diff options
378 files changed, 9728 insertions, 6537 deletions
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h index 521156a319..87bf1c4d43 100644 --- a/cmdline/cmdline_types.h +++ b/cmdline/cmdline_types.h @@ -290,26 +290,42 @@ struct CmdlineType<double> : CmdlineTypeParser<double> { static const char* Name() { return "double"; } }; +template <typename T> +static inline CmdlineParseResult<T> ParseNumeric(const std::string& str) { + static_assert(sizeof(T) < sizeof(long long int), // NOLINT [runtime/int] [4] + "Current support is restricted."); + + const char* begin = str.c_str(); + char* end; + + // Parse into a larger type (long long) because we can't use strtoul + // since it silently converts negative values into unsigned long and doesn't set errno. + errno = 0; + long long int result = strtoll(begin, &end, 10); // NOLINT [runtime/int] [4] + if (begin == end || *end != '\0' || errno == EINVAL) { + return CmdlineParseResult<T>::Failure("Failed to parse integer from " + str); + } else if ((errno == ERANGE) || // NOLINT [runtime/int] [4] + result < std::numeric_limits<T>::min() || result > std::numeric_limits<T>::max()) { + return CmdlineParseResult<T>::OutOfRange( + "Failed to parse integer from " + str + "; out of range"); + } + + return CmdlineParseResult<T>::Success(static_cast<T>(result)); +} + template <> struct CmdlineType<unsigned int> : CmdlineTypeParser<unsigned int> { Result Parse(const std::string& str) { - const char* begin = str.c_str(); - char* end; + return ParseNumeric<unsigned int>(str); + } - // Parse into a larger type (long long) because we can't use strtoul - // since it silently converts negative values into unsigned long and doesn't set errno. - errno = 0; - long long int result = strtoll(begin, &end, 10); // NOLINT [runtime/int] [4] - if (begin == end || *end != '\0' || errno == EINVAL) { - return Result::Failure("Failed to parse integer from " + str); - } else if ((errno == ERANGE) || // NOLINT [runtime/int] [4] - result < std::numeric_limits<int>::min() - || result > std::numeric_limits<unsigned int>::max() || result < 0) { - return Result::OutOfRange( - "Failed to parse integer from " + str + "; out of unsigned int range"); - } + static const char* Name() { return "unsigned integer"; } +}; - return Result::Success(static_cast<unsigned int>(result)); +template <> +struct CmdlineType<int> : CmdlineTypeParser<int> { + Result Parse(const std::string& str) { + return ParseNumeric<int>(str); } static const char* Name() { return "unsigned integer"; } diff --git a/compiler/Android.bp b/compiler/Android.bp index 59ca4c7abf..1e4cdf2bd5 100644 --- a/compiler/Android.bp +++ b/compiler/Android.bp @@ -193,6 +193,10 @@ art_cc_defaults { "liblzma", ], include_dirs: ["art/disassembler"], + header_libs: [ + "art_cmdlineparser_headers", // For compiler_options. + ], + export_include_dirs: ["."], } diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc index 538845de19..b6cedff28a 100644 --- a/compiler/driver/compiler_options.cc +++ b/compiler/driver/compiler_options.cc @@ -18,7 +18,13 @@ #include <fstream> +#include "android-base/stringprintf.h" + +#include "base/variant_map.h" +#include "cmdline_parser.h" +#include "compiler_options_map-inl.h" #include "runtime.h" +#include "simple_compiler_options_map.h" namespace art { @@ -71,115 +77,50 @@ bool CompilerOptions::EmitRunTimeChecksInDebugMode() const { (kIsTargetBuild || IsCoreImage() || Runtime::Current()->UseJitCompilation()); } -void CompilerOptions::ParseHugeMethodMax(const StringPiece& option, UsageFn Usage) { - ParseUintOption(option, "--huge-method-max", &huge_method_threshold_, Usage); -} - -void CompilerOptions::ParseLargeMethodMax(const StringPiece& option, UsageFn Usage) { - ParseUintOption(option, "--large-method-max", &large_method_threshold_, Usage); -} - -void CompilerOptions::ParseSmallMethodMax(const StringPiece& option, UsageFn Usage) { - ParseUintOption(option, "--small-method-max", &small_method_threshold_, Usage); -} - -void CompilerOptions::ParseTinyMethodMax(const StringPiece& option, UsageFn Usage) { - ParseUintOption(option, "--tiny-method-max", &tiny_method_threshold_, Usage); -} - -void CompilerOptions::ParseNumDexMethods(const StringPiece& option, UsageFn Usage) { - ParseUintOption(option, "--num-dex-methods", &num_dex_methods_threshold_, Usage); -} - -void CompilerOptions::ParseInlineMaxCodeUnits(const StringPiece& option, UsageFn Usage) { - ParseUintOption(option, "--inline-max-code-units", &inline_max_code_units_, Usage); -} - -void CompilerOptions::ParseDumpInitFailures(const StringPiece& option, - UsageFn Usage ATTRIBUTE_UNUSED) { - DCHECK(option.starts_with("--dump-init-failures=")); - std::string file_name = option.substr(strlen("--dump-init-failures=")).data(); - init_failure_output_.reset(new std::ofstream(file_name)); +bool CompilerOptions::ParseDumpInitFailures(const std::string& option, std::string* error_msg) { + init_failure_output_.reset(new std::ofstream(option)); if (init_failure_output_.get() == nullptr) { - LOG(ERROR) << "Failed to allocate ofstream"; + *error_msg = "Failed to construct std::ofstream"; + return false; } else if (init_failure_output_->fail()) { - LOG(ERROR) << "Failed to open " << file_name << " for writing the initialization " - << "failures."; + *error_msg = android::base::StringPrintf( + "Failed to open %s for writing the initialization failures.", option.c_str()); init_failure_output_.reset(); + return false; } + return true; } -void CompilerOptions::ParseRegisterAllocationStrategy(const StringPiece& option, - UsageFn Usage) { - DCHECK(option.starts_with("--register-allocation-strategy=")); - StringPiece choice = option.substr(strlen("--register-allocation-strategy=")).data(); - if (choice == "linear-scan") { +bool CompilerOptions::ParseRegisterAllocationStrategy(const std::string& option, + std::string* error_msg) { + if (option == "linear-scan") { register_allocation_strategy_ = RegisterAllocator::Strategy::kRegisterAllocatorLinearScan; - } else if (choice == "graph-color") { + } else if (option == "graph-color") { register_allocation_strategy_ = RegisterAllocator::Strategy::kRegisterAllocatorGraphColor; } else { - Usage("Unrecognized register allocation strategy. Try linear-scan, or graph-color."); + *error_msg = "Unrecognized register allocation strategy. Try linear-scan, or graph-color."; + return false; } + return true; } -bool CompilerOptions::ParseCompilerOption(const StringPiece& option, UsageFn Usage) { - if (option.starts_with("--compiler-filter=")) { - const char* compiler_filter_string = option.substr(strlen("--compiler-filter=")).data(); - if (!CompilerFilter::ParseCompilerFilter(compiler_filter_string, &compiler_filter_)) { - Usage("Unknown --compiler-filter value %s", compiler_filter_string); - } - } else if (option == "--compile-pic") { - compile_pic_ = true; - } else if (option.starts_with("--huge-method-max=")) { - ParseHugeMethodMax(option, Usage); - } else if (option.starts_with("--large-method-max=")) { - ParseLargeMethodMax(option, Usage); - } else if (option.starts_with("--small-method-max=")) { - ParseSmallMethodMax(option, Usage); - } else if (option.starts_with("--tiny-method-max=")) { - ParseTinyMethodMax(option, Usage); - } else if (option.starts_with("--num-dex-methods=")) { - ParseNumDexMethods(option, Usage); - } else if (option.starts_with("--inline-max-code-units=")) { - ParseInlineMaxCodeUnits(option, Usage); - } else if (option == "--generate-debug-info" || option == "-g") { - generate_debug_info_ = true; - } else if (option == "--no-generate-debug-info") { - generate_debug_info_ = false; - } else if (option == "--generate-mini-debug-info") { - generate_mini_debug_info_ = true; - } else if (option == "--no-generate-mini-debug-info") { - generate_mini_debug_info_ = false; - } else if (option == "--generate-build-id") { - generate_build_id_ = true; - } else if (option == "--no-generate-build-id") { - generate_build_id_ = false; - } else if (option == "--debuggable") { - debuggable_ = true; - } else if (option.starts_with("--top-k-profile-threshold=")) { - ParseDouble(option.data(), '=', 0.0, 100.0, &top_k_profile_threshold_, Usage); - } else if (option == "--abort-on-hard-verifier-error") { - abort_on_hard_verifier_failure_ = true; - } else if (option == "--no-abort-on-hard-verifier-error") { - abort_on_hard_verifier_failure_ = false; - } else if (option.starts_with("--dump-init-failures=")) { - ParseDumpInitFailures(option, Usage); - } else if (option.starts_with("--dump-cfg=")) { - dump_cfg_file_name_ = option.substr(strlen("--dump-cfg=")).data(); - } else if (option == "--dump-cfg-append") { - dump_cfg_append_ = true; - } else if (option.starts_with("--register-allocation-strategy=")) { - ParseRegisterAllocationStrategy(option, Usage); - } else if (option.starts_with("--verbose-methods=")) { - // TODO: rather than switch off compiler logging, make all VLOG(compiler) messages - // conditional on having verbose methods. - gLogVerbosity.compiler = false; - Split(option.substr(strlen("--verbose-methods=")).ToString(), ',', &verbose_methods_); - } else { - // Option not recognized. +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wframe-larger-than=" + +bool CompilerOptions::ParseCompilerOptions(const std::vector<std::string>& options, + bool ignore_unrecognized, + std::string* error_msg) { + auto parser = CreateSimpleParser(ignore_unrecognized); + CmdlineResult parse_result = parser.Parse(options); + if (!parse_result.IsSuccess()) { + *error_msg = parse_result.GetMessage(); return false; } - return true; + + SimpleParseArgumentMap args = parser.ReleaseArgumentsMap(); + return ReadCompilerOptions(args, this, error_msg); } +#pragma GCC diagnostic pop + } // namespace art diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h index a9372c4844..311dbd569e 100644 --- a/compiler/driver/compiler_options.h +++ b/compiler/driver/compiler_options.h @@ -231,7 +231,9 @@ class CompilerOptions FINAL { return no_inline_from_; } - bool ParseCompilerOption(const StringPiece& option, UsageFn Usage); + bool ParseCompilerOptions(const std::vector<std::string>& options, + bool ignore_unrecognized, + std::string* error_msg); void SetNonPic() { compile_pic_ = false; @@ -258,7 +260,7 @@ class CompilerOptions FINAL { } private: - void ParseDumpInitFailures(const StringPiece& option, UsageFn Usage); + bool ParseDumpInitFailures(const std::string& option, std::string* error_msg); void ParseDumpCfgPasses(const StringPiece& option, UsageFn Usage); void ParseInlineMaxCodeUnits(const StringPiece& option, UsageFn Usage); void ParseNumDexMethods(const StringPiece& option, UsageFn Usage); @@ -266,7 +268,7 @@ class CompilerOptions FINAL { void ParseSmallMethodMax(const StringPiece& option, UsageFn Usage); void ParseLargeMethodMax(const StringPiece& option, UsageFn Usage); void ParseHugeMethodMax(const StringPiece& option, UsageFn Usage); - void ParseRegisterAllocationStrategy(const StringPiece& option, UsageFn Usage); + bool ParseRegisterAllocationStrategy(const std::string& option, std::string* error_msg); CompilerFilter::Filter compiler_filter_; size_t huge_method_threshold_; @@ -327,6 +329,9 @@ class CompilerOptions FINAL { friend class CommonCompilerTest; friend class verifier::VerifierDepsTest; + template <class Base> + friend bool ReadCompilerOptions(Base& map, CompilerOptions* options, std::string* error_msg); + DISALLOW_COPY_AND_ASSIGN(CompilerOptions); }; diff --git a/compiler/driver/compiler_options_map-inl.h b/compiler/driver/compiler_options_map-inl.h new file mode 100644 index 0000000000..9cb818a270 --- /dev/null +++ b/compiler/driver/compiler_options_map-inl.h @@ -0,0 +1,159 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_INL_H_ +#define ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_INL_H_ + +#include "compiler_options_map.h" + +#include <memory> + +#include "android-base/logging.h" +#include "android-base/macros.h" +#include "android-base/stringprintf.h" + +#include "base/macros.h" +#include "cmdline_parser.h" +#include "compiler_options.h" + +namespace art { + +template <class Base> +inline bool ReadCompilerOptions(Base& map, CompilerOptions* options, std::string* error_msg) { + if (map.Exists(Base::CompilerFilter)) { + CompilerFilter::Filter compiler_filter; + if (!CompilerFilter::ParseCompilerFilter(map.Get(Base::CompilerFilter)->c_str(), + &compiler_filter)) { + *error_msg = android::base::StringPrintf("Unknown --compiler-filter value %s", + map.Get(Base::CompilerFilter)->c_str()); + return false; + } + options->SetCompilerFilter(compiler_filter); + } + if (map.Exists(Base::PIC)) { + options->compile_pic_ = true; + } + map.AssignIfExists(Base::HugeMethodMaxThreshold, &options->huge_method_threshold_); + map.AssignIfExists(Base::LargeMethodMaxThreshold, &options->large_method_threshold_); + map.AssignIfExists(Base::SmallMethodMaxThreshold, &options->small_method_threshold_); + map.AssignIfExists(Base::TinyMethodMaxThreshold, &options->tiny_method_threshold_); + map.AssignIfExists(Base::NumDexMethodsThreshold, &options->num_dex_methods_threshold_); + map.AssignIfExists(Base::InlineMaxCodeUnitsThreshold, &options->inline_max_code_units_); + map.AssignIfExists(Base::GenerateDebugInfo, &options->generate_debug_info_); + map.AssignIfExists(Base::GenerateMiniDebugInfo, &options->generate_mini_debug_info_); + map.AssignIfExists(Base::GenerateBuildID, &options->generate_build_id_); + if (map.Exists(Base::Debuggable)) { + options->debuggable_ = true; + } + map.AssignIfExists(Base::TopKProfileThreshold, &options->top_k_profile_threshold_); + map.AssignIfExists(Base::AbortOnHardVerifierFailure, &options->abort_on_hard_verifier_failure_); + if (map.Exists(Base::DumpInitFailures)) { + if (!options->ParseDumpInitFailures(*map.Get(Base::DumpInitFailures), error_msg)) { + return false; + } + } + map.AssignIfExists(Base::DumpCFG, &options->dump_cfg_file_name_); + if (map.Exists(Base::DumpCFGAppend)) { + options->dump_cfg_append_ = true; + } + if (map.Exists(Base::RegisterAllocationStrategy)) { + if (!options->ParseRegisterAllocationStrategy(*map.Get(Base::DumpInitFailures), error_msg)) { + return false; + } + } + map.AssignIfExists(Base::VerboseMethods, &options->verbose_methods_); + + return true; +} + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wframe-larger-than=" + +template <typename Map, typename Builder> +inline void AddCompilerOptionsArgumentParserOptions(Builder& b) { + b. + Define("--compiler-filter=_") + .template WithType<std::string>() + .IntoKey(Map::CompilerFilter) + + .Define("--compile-pic") + .IntoKey(Map::PIC) + + .Define("--huge-method-max=_") + .template WithType<unsigned int>() + .IntoKey(Map::HugeMethodMaxThreshold) + .Define("--large-method-max=_") + .template WithType<unsigned int>() + .IntoKey(Map::LargeMethodMaxThreshold) + .Define("--small-method-max=_") + .template WithType<unsigned int>() + .IntoKey(Map::SmallMethodMaxThreshold) + .Define("--tiny-method-max=_") + .template WithType<unsigned int>() + .IntoKey(Map::TinyMethodMaxThreshold) + .Define("--num-dex-methods=_") + .template WithType<unsigned int>() + .IntoKey(Map::NumDexMethodsThreshold) + .Define("--inline-max-code-units=_") + .template WithType<unsigned int>() + .IntoKey(Map::InlineMaxCodeUnitsThreshold) + + .Define({"--generate-debug-info", "-g", "--no-generate-debug-info"}) + .WithValues({true, true, false}) + .IntoKey(Map::GenerateDebugInfo) + .Define({"--generate-mini-debug-info", "--no-generate-mini-debug-info"}) + .WithValues({true, false}) + .IntoKey(Map::GenerateMiniDebugInfo) + + .Define({"--generate-build-id", "--no-generate-build-id"}) + .WithValues({true, false}) + .IntoKey(Map::GenerateBuildID) + + .Define("--debuggable") + .IntoKey(Map::Debuggable) + + .Define("--top-k-profile-threshold=_") + .template WithType<double>().WithRange(0.0, 100.0) + .IntoKey(Map::TopKProfileThreshold) + + .Define({"--abort-on-hard-verifier-error", "--no-abort-on-hard-verifier-error"}) + .WithValues({true, false}) + .IntoKey(Map::AbortOnHardVerifierFailure) + + .Define("--dump-init-failures=_") + .template WithType<std::string>() + .IntoKey(Map::DumpInitFailures) + + .Define("--dump-cfg=_") + .template WithType<std::string>() + .IntoKey(Map::DumpCFG) + .Define("--dump-cfg-append") + .IntoKey(Map::DumpCFGAppend) + + .Define("--register-allocation-strategy=_") + .template WithType<std::string>() + .IntoKey(Map::RegisterAllocationStrategy) + + .Define("--verbose-methods=_") + .template WithType<ParseStringList<','>>() + .IntoKey(Map::VerboseMethods); +} + +#pragma GCC diagnostic pop + +} // namespace art + +#endif // ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_INL_H_ diff --git a/compiler/driver/compiler_options_map-storage.h b/compiler/driver/compiler_options_map-storage.h new file mode 100644 index 0000000000..756598de05 --- /dev/null +++ b/compiler/driver/compiler_options_map-storage.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_STORAGE_H_ +#define ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_STORAGE_H_ + +// Assumes: +// * #include "compiler_options_map.h" +// * namespace art +// +// Usage: +// #define COMPILER_OPTIONS_MAP_TYPE TheTypeOfTheMap +// #define COMPILER_OPTIONS_MAP_KEY_TYPE TheTypeOfTheMapsKey +// #include "driver/compiler_options_map-storage.h + +#ifndef COMPILER_OPTIONS_MAP_TYPE +#error "Expected COMPILER_OPTIONS_MAP_TYPE" +#endif + +#ifndef COMPILER_OPTIONS_MAP_KEY_TYPE +#error "Expected COMPILER_OPTIONS_MAP_KEY_TYPE" +#endif + +#define COMPILER_OPTIONS_KEY(Type, Name, ...) \ + template <typename Base, template <typename TV> class KeyType> \ + const KeyType<Type> CompilerOptionsMap<Base, KeyType>::Name {__VA_ARGS__}; // NOLINT [readability/braces] [4] +#include <driver/compiler_options_map.def> + +template struct CompilerOptionsMap<COMPILER_OPTIONS_MAP_TYPE, COMPILER_OPTIONS_MAP_KEY_TYPE>; + +#undef COMPILER_OPTIONS_MAP_TYPE +#undef COMPILER_OPTIONS_MAP_KEY_TYPE + +#endif // ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_STORAGE_H_ +#undef ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_STORAGE_H_ // Guard is only for cpplint diff --git a/compiler/driver/compiler_options_map.def b/compiler/driver/compiler_options_map.def new file mode 100644 index 0000000000..570bc5aca7 --- /dev/null +++ b/compiler/driver/compiler_options_map.def @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef COMPILER_OPTIONS_KEY +#error "Please #define COMPILER_OPTIONS_KEY before #including this file" +#define COMPILER_OPTIONS_KEY(...) // Don't display errors in this file in IDEs. +#endif + +// This file defines the list of keys for CompilerOptionsMap. +// These can be used with CompilerOptionsMap.Get/Set/etc, once that template class has been +// instantiated. +// +// Column Descriptions: +// <<Type>> <<Key Name>> (<<Default Value>>) +// +// Default values are only used by Map::GetOrDefault(K<T>). +// If a default value is omitted here, T{} is used as the default value, which is +// almost-always the value of the type as if it was memset to all 0. +// +// Please keep the columns aligned if possible when adding new rows. +// + +// Parse-able keys from the command line. + +// TODO: Add type parser. +COMPILER_OPTIONS_KEY (std::string, CompilerFilter) +COMPILER_OPTIONS_KEY (Unit, PIC) +COMPILER_OPTIONS_KEY (unsigned int, HugeMethodMaxThreshold) +COMPILER_OPTIONS_KEY (unsigned int, LargeMethodMaxThreshold) +COMPILER_OPTIONS_KEY (unsigned int, SmallMethodMaxThreshold) +COMPILER_OPTIONS_KEY (unsigned int, TinyMethodMaxThreshold) +COMPILER_OPTIONS_KEY (unsigned int, NumDexMethodsThreshold) +COMPILER_OPTIONS_KEY (unsigned int, InlineMaxCodeUnitsThreshold) +COMPILER_OPTIONS_KEY (bool, GenerateDebugInfo) +COMPILER_OPTIONS_KEY (bool, GenerateMiniDebugInfo) +COMPILER_OPTIONS_KEY (bool, GenerateBuildID) +COMPILER_OPTIONS_KEY (Unit, Debuggable) +COMPILER_OPTIONS_KEY (double, TopKProfileThreshold) +COMPILER_OPTIONS_KEY (bool, AbortOnHardVerifierFailure) +COMPILER_OPTIONS_KEY (std::string, DumpInitFailures) +COMPILER_OPTIONS_KEY (std::string, DumpCFG) +COMPILER_OPTIONS_KEY (Unit, DumpCFGAppend) +// TODO: Add type parser. +COMPILER_OPTIONS_KEY (std::string, RegisterAllocationStrategy) +COMPILER_OPTIONS_KEY (ParseStringList<','>, VerboseMethods) + +#undef COMPILER_OPTIONS_KEY diff --git a/compiler/driver/compiler_options_map.h b/compiler/driver/compiler_options_map.h new file mode 100644 index 0000000000..b9bc8b6ea1 --- /dev/null +++ b/compiler/driver/compiler_options_map.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_H_ +#define ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_H_ + +#include <string> +#include <vector> + +#include "base/variant_map.h" +#include "cmdline_types.h" + +namespace art { + +// Defines a type-safe heterogeneous key->value map. This is to be used as the base for +// an extended map. +template <typename Base, template <typename TV> class KeyType> +struct CompilerOptionsMap : VariantMap<Base, KeyType> { + // Make the next many usages of Key slightly shorter to type. + template <typename TValue> + using Key = KeyType<TValue>; + + // List of key declarations, shorthand for 'static const Key<T> Name' +#define COMPILER_OPTIONS_KEY(Type, Name, ...) static const Key<Type> (Name); +#include "compiler_options_map.def" +}; + +#undef DECLARE_KEY + +} // namespace art + +#endif // ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_H_ diff --git a/compiler/driver/simple_compiler_options_map.h b/compiler/driver/simple_compiler_options_map.h new file mode 100644 index 0000000000..3860da9f66 --- /dev/null +++ b/compiler/driver/simple_compiler_options_map.h @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file declares a completion of the CompilerOptionsMap and should be included into a +// .cc file, only. + +#ifndef ART_COMPILER_DRIVER_SIMPLE_COMPILER_OPTIONS_MAP_H_ +#define ART_COMPILER_DRIVER_SIMPLE_COMPILER_OPTIONS_MAP_H_ + +#include <memory> + +#include "compiler_options_map-inl.h" +#include "base/variant_map.h" + +namespace art { + +template <typename TValue> +struct SimpleParseArgumentMapKey : VariantMapKey<TValue> { + SimpleParseArgumentMapKey() {} + explicit SimpleParseArgumentMapKey(TValue default_value) + : VariantMapKey<TValue>(std::move(default_value)) {} + // Don't ODR-use constexpr default values, which means that Struct::Fields + // that are declared 'static constexpr T Name = Value' don't need to have a matching definition. +}; + +struct SimpleParseArgumentMap : CompilerOptionsMap<SimpleParseArgumentMap, + SimpleParseArgumentMapKey> { + // This 'using' line is necessary to inherit the variadic constructor. + using CompilerOptionsMap<SimpleParseArgumentMap, SimpleParseArgumentMapKey>::CompilerOptionsMap; +}; + +#define COMPILER_OPTIONS_MAP_TYPE SimpleParseArgumentMap +#define COMPILER_OPTIONS_MAP_KEY_TYPE SimpleParseArgumentMapKey +#include "compiler_options_map-storage.h" + +using Parser = CmdlineParser<SimpleParseArgumentMap, SimpleParseArgumentMapKey>; + +static inline Parser CreateSimpleParser(bool ignore_unrecognized) { + std::unique_ptr<Parser::Builder> parser_builder = + std::unique_ptr<Parser::Builder>(new Parser::Builder()); + + AddCompilerOptionsArgumentParserOptions<SimpleParseArgumentMap>(*parser_builder); + + parser_builder->IgnoreUnrecognized(ignore_unrecognized); + + return parser_builder->Build(); +} + +} // namespace art + +#endif // ART_COMPILER_DRIVER_SIMPLE_COMPILER_OPTIONS_MAP_H_ diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc index 511a44af04..5c89869e00 100644 --- a/compiler/jit/jit_compiler.cc +++ b/compiler/jit/jit_compiler.cc @@ -78,21 +78,16 @@ extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t cou } } -// Callers of this method assume it has NO_RETURN. -NO_RETURN static void Usage(const char* fmt, ...) { - va_list ap; - va_start(ap, fmt); - std::string error; - android::base::StringAppendV(&error, fmt, ap); - LOG(FATAL) << error; - va_end(ap); - exit(EXIT_FAILURE); -} - JitCompiler::JitCompiler() { compiler_options_.reset(new CompilerOptions()); - for (const std::string& argument : Runtime::Current()->GetCompilerOptions()) { - compiler_options_->ParseCompilerOption(argument, Usage); + { + std::string error_msg; + if (!compiler_options_->ParseCompilerOptions(Runtime::Current()->GetCompilerOptions(), + true /* ignore_unrecognized */, + &error_msg)) { + LOG(FATAL) << error_msg; + UNREACHABLE(); + } } // JIT is never PIC, no matter what the runtime compiler options specify. compiler_options_->SetNonPic(); diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc index 347f4ea9d4..28709a1bbc 100644 --- a/compiler/jni/jni_cfi_test.cc +++ b/compiler/jni/jni_cfi_test.cc @@ -84,7 +84,7 @@ class JNICFITest : public CFITest { callee_save_regs, mr_conv->EntrySpills()); jni_asm->IncreaseFrameSize(32); jni_asm->DecreaseFrameSize(32); - jni_asm->RemoveFrame(frame_size, callee_save_regs); + jni_asm->RemoveFrame(frame_size, callee_save_regs, /* may_suspend */ true); jni_asm->FinalizeCode(); std::vector<uint8_t> actual_asm(jni_asm->CodeSize()); MemoryRegion code(&actual_asm[0], actual_asm.size()); diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc index 292ce1039e..3afd7011ca 100644 --- a/compiler/jni/quick/arm64/calling_convention_arm64.cc +++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc @@ -110,23 +110,31 @@ static constexpr uint32_t kFpCalleeSpillMask = CalculateFpCalleeSpillMask(); // Calling convention ManagedRegister Arm64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() { // X20 is safe to use as a scratch register: - // - with Baker read barriers, it is reserved as Marking Register, - // and thus does not actually need to be saved/restored; it is - // refreshed on exit (see Arm64JNIMacroAssembler::RemoveFrame); + // - with Baker read barriers (in the case of a non-critical native + // method), it is reserved as Marking Register, and thus does not + // actually need to be saved/restored; it is refreshed on exit + // (see Arm64JNIMacroAssembler::RemoveFrame); // - in other cases, it is saved on entry (in // Arm64JNIMacroAssembler::BuildFrame) and restored on exit (in - // Arm64JNIMacroAssembler::RemoveFrame). + // Arm64JNIMacroAssembler::RemoveFrame). This is also expected in + // the case of a critical native method in the Baker read barrier + // configuration, where the value of MR must be preserved across + // the JNI call (as there is no MR refresh in that case). return Arm64ManagedRegister::FromXRegister(X20); } ManagedRegister Arm64JniCallingConvention::InterproceduralScratchRegister() { // X20 is safe to use as a scratch register: - // - with Baker read barriers, it is reserved as Marking Register, - // and thus does not actually need to be saved/restored; it is - // refreshed on exit (see Arm64JNIMacroAssembler::RemoveFrame); + // - with Baker read barriers (in the case of a non-critical native + // method), it is reserved as Marking Register, and thus does not + // actually need to be saved/restored; it is refreshed on exit + // (see Arm64JNIMacroAssembler::RemoveFrame); // - in other cases, it is saved on entry (in // Arm64JNIMacroAssembler::BuildFrame) and restored on exit (in - // Arm64JNIMacroAssembler::RemoveFrame). + // Arm64JNIMacroAssembler::RemoveFrame). This is also expected in + // the case of a critical native method in the Baker read barrier + // configuration, where the value of MR must be preserved across + // the JNI call (as there is no MR refresh in that case). return Arm64ManagedRegister::FromXRegister(X20); } diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc index 36a87a89db..42a5f86117 100644 --- a/compiler/jni/quick/calling_convention.cc +++ b/compiler/jni/quick/calling_convention.cc @@ -47,7 +47,7 @@ namespace art { // Managed runtime calling convention std::unique_ptr<ManagedRuntimeCallingConvention> ManagedRuntimeCallingConvention::Create( - ArenaAllocator* arena, + ArenaAllocator* allocator, bool is_static, bool is_synchronized, const char* shorty, @@ -57,35 +57,37 @@ std::unique_ptr<ManagedRuntimeCallingConvention> ManagedRuntimeCallingConvention case kArm: case kThumb2: return std::unique_ptr<ManagedRuntimeCallingConvention>( - new (arena) arm::ArmManagedRuntimeCallingConvention(is_static, is_synchronized, shorty)); + new (allocator) arm::ArmManagedRuntimeCallingConvention( + is_static, is_synchronized, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_arm64 case kArm64: return std::unique_ptr<ManagedRuntimeCallingConvention>( - new (arena) arm64::Arm64ManagedRuntimeCallingConvention( + new (allocator) arm64::Arm64ManagedRuntimeCallingConvention( is_static, is_synchronized, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_mips case kMips: return std::unique_ptr<ManagedRuntimeCallingConvention>( - new (arena) mips::MipsManagedRuntimeCallingConvention( + new (allocator) mips::MipsManagedRuntimeCallingConvention( is_static, is_synchronized, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_mips64 case kMips64: return std::unique_ptr<ManagedRuntimeCallingConvention>( - new (arena) mips64::Mips64ManagedRuntimeCallingConvention( + new (allocator) mips64::Mips64ManagedRuntimeCallingConvention( is_static, is_synchronized, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_x86 case kX86: return std::unique_ptr<ManagedRuntimeCallingConvention>( - new (arena) x86::X86ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty)); + new (allocator) x86::X86ManagedRuntimeCallingConvention( + is_static, is_synchronized, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_x86_64 case kX86_64: return std::unique_ptr<ManagedRuntimeCallingConvention>( - new (arena) x86_64::X86_64ManagedRuntimeCallingConvention( + new (allocator) x86_64::X86_64ManagedRuntimeCallingConvention( is_static, is_synchronized, shorty)); #endif default: @@ -146,7 +148,7 @@ bool ManagedRuntimeCallingConvention::IsCurrentParamALong() { // JNI calling convention -std::unique_ptr<JniCallingConvention> JniCallingConvention::Create(ArenaAllocator* arena, +std::unique_ptr<JniCallingConvention> JniCallingConvention::Create(ArenaAllocator* allocator, bool is_static, bool is_synchronized, bool is_critical_native, @@ -157,50 +159,38 @@ std::unique_ptr<JniCallingConvention> JniCallingConvention::Create(ArenaAllocato case kArm: case kThumb2: return std::unique_ptr<JniCallingConvention>( - new (arena) arm::ArmJniCallingConvention(is_static, - is_synchronized, - is_critical_native, - shorty)); + new (allocator) arm::ArmJniCallingConvention( + is_static, is_synchronized, is_critical_native, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_arm64 case kArm64: return std::unique_ptr<JniCallingConvention>( - new (arena) arm64::Arm64JniCallingConvention(is_static, - is_synchronized, - is_critical_native, - shorty)); + new (allocator) arm64::Arm64JniCallingConvention( + is_static, is_synchronized, is_critical_native, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_mips case kMips: return std::unique_ptr<JniCallingConvention>( - new (arena) mips::MipsJniCallingConvention(is_static, - is_synchronized, - is_critical_native, - shorty)); + new (allocator) mips::MipsJniCallingConvention( + is_static, is_synchronized, is_critical_native, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_mips64 case kMips64: return std::unique_ptr<JniCallingConvention>( - new (arena) mips64::Mips64JniCallingConvention(is_static, - is_synchronized, - is_critical_native, - shorty)); + new (allocator) mips64::Mips64JniCallingConvention( + is_static, is_synchronized, is_critical_native, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_x86 case kX86: return std::unique_ptr<JniCallingConvention>( - new (arena) x86::X86JniCallingConvention(is_static, - is_synchronized, - is_critical_native, - shorty)); + new (allocator) x86::X86JniCallingConvention( + is_static, is_synchronized, is_critical_native, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_x86_64 case kX86_64: return std::unique_ptr<JniCallingConvention>( - new (arena) x86_64::X86_64JniCallingConvention(is_static, - is_synchronized, - is_critical_native, - shorty)); + new (allocator) x86_64::X86_64JniCallingConvention( + is_static, is_synchronized, is_critical_native, shorty)); #endif default: LOG(FATAL) << "Unknown InstructionSet: " << instruction_set; diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h index 335a2dfa3c..be0bd72a13 100644 --- a/compiler/jni/quick/calling_convention.h +++ b/compiler/jni/quick/calling_convention.h @@ -231,7 +231,7 @@ class CallingConvention : public DeletableArenaObject<kArenaAllocCallingConventi // | { Method* } | <-- SP class ManagedRuntimeCallingConvention : public CallingConvention { public: - static std::unique_ptr<ManagedRuntimeCallingConvention> Create(ArenaAllocator* arena, + static std::unique_ptr<ManagedRuntimeCallingConvention> Create(ArenaAllocator* allocator, bool is_static, bool is_synchronized, const char* shorty, @@ -284,7 +284,7 @@ class ManagedRuntimeCallingConvention : public CallingConvention { // callee saves for frames above this one. class JniCallingConvention : public CallingConvention { public: - static std::unique_ptr<JniCallingConvention> Create(ArenaAllocator* arena, + static std::unique_ptr<JniCallingConvention> Create(ArenaAllocator* allocator, bool is_static, bool is_synchronized, bool is_critical_native, diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc index c66a2a62eb..92b5c4d8ff 100644 --- a/compiler/jni/quick/jni_compiler.cc +++ b/compiler/jni/quick/jni_compiler.cc @@ -66,8 +66,8 @@ static void SetNativeParameter(JNIMacroAssembler<kPointerSize>* jni_asm, template <PointerSize kPointerSize> static std::unique_ptr<JNIMacroAssembler<kPointerSize>> GetMacroAssembler( - ArenaAllocator* arena, InstructionSet isa, const InstructionSetFeatures* features) { - return JNIMacroAssembler<kPointerSize>::Create(arena, isa, features); + ArenaAllocator* allocator, InstructionSet isa, const InstructionSetFeatures* features) { + return JNIMacroAssembler<kPointerSize>::Create(allocator, isa, features); } enum class JniEntrypoint { @@ -646,7 +646,10 @@ static CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, // 16. Remove activation - need to restore callee save registers since the GC may have changed // them. DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size)); - __ RemoveFrame(frame_size, callee_save_regs); + // We expect the compiled method to possibly be suspended during its + // execution, except in the case of a CriticalNative method. + bool may_suspend = !is_critical_native; + __ RemoveFrame(frame_size, callee_save_regs, may_suspend); DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size)); // 17. Finalize code generation diff --git a/compiler/optimizing/block_builder.h b/compiler/optimizing/block_builder.h index 6adce815f4..4a0f78ce3d 100644 --- a/compiler/optimizing/block_builder.h +++ b/compiler/optimizing/block_builder.h @@ -29,7 +29,7 @@ class HBasicBlockBuilder : public ValueObject { HBasicBlockBuilder(HGraph* graph, const DexFile* const dex_file, const DexFile::CodeItem& code_item) - : arena_(graph->GetArena()), + : arena_(graph->GetAllocator()), graph_(graph), dex_file_(dex_file), code_item_(code_item), diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc index a7f7bce07a..0255e7302c 100644 --- a/compiler/optimizing/bounds_check_elimination.cc +++ b/compiler/optimizing/bounds_check_elimination.cc @@ -513,18 +513,18 @@ class BCEVisitor : public HGraphVisitor { maps_(graph->GetBlocks().size(), ArenaSafeMap<int, ValueRange*>( std::less<int>(), - graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)), - graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)), + graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)), + graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)), first_index_bounds_check_map_( std::less<int>(), - graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)), + graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)), early_exit_loop_( std::less<uint32_t>(), - graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)), + graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)), taken_test_loop_( std::less<uint32_t>(), - graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)), - finite_loop_(graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)), + graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)), + finite_loop_(graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)), has_dom_based_dynamic_bce_(false), initial_block_size_(graph->GetBlocks().size()), side_effects_(side_effects), @@ -668,8 +668,8 @@ class BCEVisitor : public HGraphVisitor { if (successor != nullptr) { bool overflow; bool underflow; - ValueRange* new_left_range = new (GetGraph()->GetArena()) ValueRange( - GetGraph()->GetArena(), + ValueRange* new_left_range = new (GetGraph()->GetAllocator()) ValueRange( + GetGraph()->GetAllocator(), left_range->GetBound(), right_range->GetBound().Add(left_compensation, &overflow, &underflow)); if (!overflow && !underflow) { @@ -677,8 +677,8 @@ class BCEVisitor : public HGraphVisitor { new_left_range); } - ValueRange* new_right_range = new (GetGraph()->GetArena()) ValueRange( - GetGraph()->GetArena(), + ValueRange* new_right_range = new (GetGraph()->GetAllocator()) ValueRange( + GetGraph()->GetAllocator(), left_range->GetBound().Add(right_compensation, &overflow, &underflow), right_range->GetBound()); if (!overflow && !underflow) { @@ -750,8 +750,8 @@ class BCEVisitor : public HGraphVisitor { if (overflow || underflow) { return; } - ValueRange* new_range = new (GetGraph()->GetArena()) - ValueRange(GetGraph()->GetArena(), ValueBound::Min(), new_upper); + ValueRange* new_range = new (GetGraph()->GetAllocator()) + ValueRange(GetGraph()->GetAllocator(), ValueBound::Min(), new_upper); ApplyRangeFromComparison(left, block, true_successor, new_range); } @@ -762,8 +762,8 @@ class BCEVisitor : public HGraphVisitor { if (overflow || underflow) { return; } - ValueRange* new_range = new (GetGraph()->GetArena()) - ValueRange(GetGraph()->GetArena(), new_lower, ValueBound::Max()); + ValueRange* new_range = new (GetGraph()->GetAllocator()) + ValueRange(GetGraph()->GetAllocator(), new_lower, ValueBound::Max()); ApplyRangeFromComparison(left, block, false_successor, new_range); } } else if (cond == kCondGT || cond == kCondGE) { @@ -774,8 +774,8 @@ class BCEVisitor : public HGraphVisitor { if (overflow || underflow) { return; } - ValueRange* new_range = new (GetGraph()->GetArena()) - ValueRange(GetGraph()->GetArena(), new_lower, ValueBound::Max()); + ValueRange* new_range = new (GetGraph()->GetAllocator()) + ValueRange(GetGraph()->GetAllocator(), new_lower, ValueBound::Max()); ApplyRangeFromComparison(left, block, true_successor, new_range); } @@ -785,8 +785,8 @@ class BCEVisitor : public HGraphVisitor { if (overflow || underflow) { return; } - ValueRange* new_range = new (GetGraph()->GetArena()) - ValueRange(GetGraph()->GetArena(), ValueBound::Min(), new_upper); + ValueRange* new_range = new (GetGraph()->GetAllocator()) + ValueRange(GetGraph()->GetAllocator(), ValueBound::Min(), new_upper); ApplyRangeFromComparison(left, block, false_successor, new_range); } } else if (cond == kCondNE || cond == kCondEQ) { @@ -795,8 +795,8 @@ class BCEVisitor : public HGraphVisitor { // length == [c,d] yields [c, d] along true // length != [c,d] yields [c, d] along false if (!lower.Equals(ValueBound::Min()) || !upper.Equals(ValueBound::Max())) { - ValueRange* new_range = new (GetGraph()->GetArena()) - ValueRange(GetGraph()->GetArena(), lower, upper); + ValueRange* new_range = new (GetGraph()->GetAllocator()) + ValueRange(GetGraph()->GetAllocator(), lower, upper); ApplyRangeFromComparison( left, block, cond == kCondEQ ? true_successor : false_successor, new_range); } @@ -804,8 +804,8 @@ class BCEVisitor : public HGraphVisitor { // length == 0 yields [1, max] along false // length != 0 yields [1, max] along true if (lower.GetConstant() == 0 && upper.GetConstant() == 0) { - ValueRange* new_range = new (GetGraph()->GetArena()) - ValueRange(GetGraph()->GetArena(), ValueBound(nullptr, 1), ValueBound::Max()); + ValueRange* new_range = new (GetGraph()->GetAllocator()) + ValueRange(GetGraph()->GetAllocator(), ValueBound(nullptr, 1), ValueBound::Max()); ApplyRangeFromComparison( left, block, cond == kCondEQ ? false_successor : true_successor, new_range); } @@ -826,7 +826,7 @@ class BCEVisitor : public HGraphVisitor { // Non-constant index. ValueBound lower = ValueBound(nullptr, 0); // constant 0 ValueBound upper = ValueBound(array_length, -1); // array_length - 1 - ValueRange array_range(GetGraph()->GetArena(), lower, upper); + ValueRange array_range(GetGraph()->GetAllocator(), lower, upper); // Try index range obtained by dominator-based analysis. ValueRange* index_range = LookupValueRange(index, block); if (index_range != nullptr && index_range->FitsIn(&array_range)) { @@ -875,8 +875,8 @@ class BCEVisitor : public HGraphVisitor { } else { ValueBound lower = ValueBound(nullptr, constant + 1); ValueBound upper = ValueBound::Max(); - ValueRange* range = new (GetGraph()->GetArena()) - ValueRange(GetGraph()->GetArena(), lower, upper); + ValueRange* range = new (GetGraph()->GetAllocator()) + ValueRange(GetGraph()->GetAllocator(), lower, upper); AssignRange(block, array_length, range); } } @@ -938,8 +938,8 @@ class BCEVisitor : public HGraphVisitor { ValueRange* range = nullptr; if (increment == 0) { // Add constant 0. It's really a fixed value. - range = new (GetGraph()->GetArena()) ValueRange( - GetGraph()->GetArena(), + range = new (GetGraph()->GetAllocator()) ValueRange( + GetGraph()->GetAllocator(), ValueBound(initial_value, 0), ValueBound(initial_value, 0)); } else { @@ -959,8 +959,8 @@ class BCEVisitor : public HGraphVisitor { bound = increment > 0 ? ValueBound::Min() : ValueBound::Max(); } } - range = new (GetGraph()->GetArena()) MonotonicValueRange( - GetGraph()->GetArena(), + range = new (GetGraph()->GetAllocator()) MonotonicValueRange( + GetGraph()->GetAllocator(), phi, initial_value, increment, @@ -1039,8 +1039,8 @@ class BCEVisitor : public HGraphVisitor { !ValueBound::WouldAddOverflowOrUnderflow(c0, -c1)) { if ((c0 - c1) <= 0) { // array.length + (c0 - c1) won't overflow/underflow. - ValueRange* range = new (GetGraph()->GetArena()) ValueRange( - GetGraph()->GetArena(), + ValueRange* range = new (GetGraph()->GetAllocator()) ValueRange( + GetGraph()->GetAllocator(), ValueBound(nullptr, right_const - upper.GetConstant()), ValueBound(array_length, right_const - lower.GetConstant())); AssignRange(sub->GetBlock(), sub, range); @@ -1087,8 +1087,8 @@ class BCEVisitor : public HGraphVisitor { // than array_length. return; } - ValueRange* range = new (GetGraph()->GetArena()) ValueRange( - GetGraph()->GetArena(), + ValueRange* range = new (GetGraph()->GetAllocator()) ValueRange( + GetGraph()->GetAllocator(), ValueBound(nullptr, std::numeric_limits<int32_t>::min()), ValueBound(left, 0)); AssignRange(instruction->GetBlock(), instruction, range); @@ -1113,8 +1113,8 @@ class BCEVisitor : public HGraphVisitor { if (constant > 0) { // constant serves as a mask so any number masked with it // gets a [0, constant] value range. - ValueRange* range = new (GetGraph()->GetArena()) ValueRange( - GetGraph()->GetArena(), + ValueRange* range = new (GetGraph()->GetAllocator()) ValueRange( + GetGraph()->GetAllocator(), ValueBound(nullptr, 0), ValueBound(nullptr, constant)); AssignRange(instruction->GetBlock(), instruction, range); @@ -1139,8 +1139,8 @@ class BCEVisitor : public HGraphVisitor { // array[i % 10]; // index value range [0, 9] // array[i % -10]; // index value range [0, 9] // } - ValueRange* right_range = new (GetGraph()->GetArena()) ValueRange( - GetGraph()->GetArena(), + ValueRange* right_range = new (GetGraph()->GetAllocator()) ValueRange( + GetGraph()->GetAllocator(), ValueBound(nullptr, 1 - right_const), ValueBound(nullptr, right_const - 1)); @@ -1169,8 +1169,8 @@ class BCEVisitor : public HGraphVisitor { if (right->IsArrayLength()) { ValueBound lower = ValueBound::Min(); // ideally, lower should be '1-array_length'. ValueBound upper = ValueBound(right, -1); // array_length - 1 - ValueRange* right_range = new (GetGraph()->GetArena()) ValueRange( - GetGraph()->GetArena(), + ValueRange* right_range = new (GetGraph()->GetAllocator()) ValueRange( + GetGraph()->GetAllocator(), lower, upper); ValueRange* left_range = LookupValueRange(left, instruction->GetBlock()); @@ -1195,8 +1195,8 @@ class BCEVisitor : public HGraphVisitor { // which isn't available as an instruction yet. new_array will // be treated the same as new_array.length when it's used in a ValueBound. ValueBound upper = ValueBound(new_array, -right_const); - ValueRange* range = new (GetGraph()->GetArena()) - ValueRange(GetGraph()->GetArena(), lower, upper); + ValueRange* range = new (GetGraph()->GetAllocator()) + ValueRange(GetGraph()->GetAllocator(), lower, upper); ValueRange* existing_range = LookupValueRange(left, new_array->GetBlock()); if (existing_range != nullptr) { range = existing_range->Narrow(range); @@ -1260,14 +1260,15 @@ class BCEVisitor : public HGraphVisitor { if (base == nullptr) { DCHECK_GE(min_c, 0); } else { - HInstruction* lower = new (GetGraph()->GetArena()) + HInstruction* lower = new (GetGraph()->GetAllocator()) HAdd(DataType::Type::kInt32, base, GetGraph()->GetIntConstant(min_c)); - upper = new (GetGraph()->GetArena()) HAdd(DataType::Type::kInt32, base, upper); + upper = new (GetGraph()->GetAllocator()) HAdd(DataType::Type::kInt32, base, upper); block->InsertInstructionBefore(lower, bounds_check); block->InsertInstructionBefore(upper, bounds_check); - InsertDeoptInBlock(bounds_check, new (GetGraph()->GetArena()) HAbove(lower, upper)); + InsertDeoptInBlock(bounds_check, new (GetGraph()->GetAllocator()) HAbove(lower, upper)); } - InsertDeoptInBlock(bounds_check, new (GetGraph()->GetArena()) HAboveOrEqual(upper, array_length)); + InsertDeoptInBlock( + bounds_check, new (GetGraph()->GetAllocator()) HAboveOrEqual(upper, array_length)); // Flag that this kind of deoptimization has occurred. has_dom_based_dynamic_bce_ = true; } @@ -1291,9 +1292,9 @@ class BCEVisitor : public HGraphVisitor { int32_t min_c = base == nullptr ? 0 : value.GetConstant(); int32_t max_c = value.GetConstant(); ArenaVector<HBoundsCheck*> candidates( - GetGraph()->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)); + GetGraph()->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)); ArenaVector<HBoundsCheck*> standby( - GetGraph()->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)); + GetGraph()->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)); for (const HUseListNode<HInstruction*>& use : array_length->GetUses()) { // Another bounds check in same or dominated block? HInstruction* user = use.GetUser(); @@ -1377,7 +1378,7 @@ class BCEVisitor : public HGraphVisitor { v2.is_known && (v2.a_constant == 0 || v2.a_constant == 1)) { DCHECK(v1.a_constant == 1 || v1.instruction == nullptr); DCHECK(v2.a_constant == 1 || v2.instruction == nullptr); - ValueRange index_range(GetGraph()->GetArena(), + ValueRange index_range(GetGraph()->GetAllocator(), ValueBound(v1.instruction, v1.b_constant), ValueBound(v2.instruction, v2.b_constant)); // If analysis reveals a certain OOB, disable dynamic BCE. Otherwise, @@ -1410,9 +1411,9 @@ class BCEVisitor : public HGraphVisitor { int32_t min_c = base == nullptr ? 0 : value.GetConstant(); int32_t max_c = value.GetConstant(); ArenaVector<HBoundsCheck*> candidates( - GetGraph()->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)); + GetGraph()->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)); ArenaVector<HBoundsCheck*> standby( - GetGraph()->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)); + GetGraph()->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)); for (const HUseListNode<HInstruction*>& use : array_length->GetUses()) { HInstruction* user = use.GetUser(); if (user->IsBoundsCheck() && loop == user->GetBlock()->GetLoopInformation()) { @@ -1498,7 +1499,8 @@ class BCEVisitor : public HGraphVisitor { if (min_c != max_c) { DCHECK(min_lower == nullptr && min_upper != nullptr && max_lower == nullptr && max_upper != nullptr); - InsertDeoptInLoop(loop, block, new (GetGraph()->GetArena()) HAbove(min_upper, max_upper)); + InsertDeoptInLoop( + loop, block, new (GetGraph()->GetAllocator()) HAbove(min_upper, max_upper)); } else { DCHECK(min_lower == nullptr && min_upper == nullptr && max_lower == nullptr && max_upper != nullptr); @@ -1508,15 +1510,17 @@ class BCEVisitor : public HGraphVisitor { if (min_c != max_c) { DCHECK(min_lower != nullptr && min_upper != nullptr && max_lower != nullptr && max_upper != nullptr); - InsertDeoptInLoop(loop, block, new (GetGraph()->GetArena()) HAbove(min_lower, max_lower)); + InsertDeoptInLoop( + loop, block, new (GetGraph()->GetAllocator()) HAbove(min_lower, max_lower)); } else { DCHECK(min_lower == nullptr && min_upper == nullptr && max_lower != nullptr && max_upper != nullptr); } - InsertDeoptInLoop(loop, block, new (GetGraph()->GetArena()) HAbove(max_lower, max_upper)); + InsertDeoptInLoop( + loop, block, new (GetGraph()->GetAllocator()) HAbove(max_lower, max_upper)); } InsertDeoptInLoop( - loop, block, new (GetGraph()->GetArena()) HAboveOrEqual(max_upper, array_length)); + loop, block, new (GetGraph()->GetAllocator()) HAboveOrEqual(max_upper, array_length)); } else { // TODO: if rejected, avoid doing this again for subsequent instructions in this set? } @@ -1610,7 +1614,7 @@ class BCEVisitor : public HGraphVisitor { TransformLoopForDeoptimizationIfNeeded(loop, needs_taken_test); HBasicBlock* block = GetPreHeader(loop, check); HInstruction* cond = - new (GetGraph()->GetArena()) HEqual(array, GetGraph()->GetNullConstant()); + new (GetGraph()->GetAllocator()) HEqual(array, GetGraph()->GetNullConstant()); InsertDeoptInLoop(loop, block, cond, /* is_null_check */ true); ReplaceInstruction(check, array); return true; @@ -1685,8 +1689,8 @@ class BCEVisitor : public HGraphVisitor { block->InsertInstructionBefore(condition, block->GetLastInstruction()); DeoptimizationKind kind = is_null_check ? DeoptimizationKind::kLoopNullBCE : DeoptimizationKind::kLoopBoundsBCE; - HDeoptimize* deoptimize = new (GetGraph()->GetArena()) HDeoptimize( - GetGraph()->GetArena(), condition, kind, suspend->GetDexPc()); + HDeoptimize* deoptimize = new (GetGraph()->GetAllocator()) HDeoptimize( + GetGraph()->GetAllocator(), condition, kind, suspend->GetDexPc()); block->InsertInstructionBefore(deoptimize, block->GetLastInstruction()); if (suspend->HasEnvironment()) { deoptimize->CopyEnvironmentFromWithLoopPhiAdjustment( @@ -1698,8 +1702,11 @@ class BCEVisitor : public HGraphVisitor { void InsertDeoptInBlock(HBoundsCheck* bounds_check, HInstruction* condition) { HBasicBlock* block = bounds_check->GetBlock(); block->InsertInstructionBefore(condition, bounds_check); - HDeoptimize* deoptimize = new (GetGraph()->GetArena()) HDeoptimize( - GetGraph()->GetArena(), condition, DeoptimizationKind::kBlockBCE, bounds_check->GetDexPc()); + HDeoptimize* deoptimize = new (GetGraph()->GetAllocator()) HDeoptimize( + GetGraph()->GetAllocator(), + condition, + DeoptimizationKind::kBlockBCE, + bounds_check->GetDexPc()); block->InsertInstructionBefore(deoptimize, bounds_check); deoptimize->CopyEnvironmentFrom(bounds_check->GetEnvironment()); } @@ -1763,18 +1770,18 @@ class BCEVisitor : public HGraphVisitor { HBasicBlock* false_block = if_block->GetSuccessors()[1]; // False successor. // Goto instructions. - true_block->AddInstruction(new (GetGraph()->GetArena()) HGoto()); - false_block->AddInstruction(new (GetGraph()->GetArena()) HGoto()); - new_preheader->AddInstruction(new (GetGraph()->GetArena()) HGoto()); + true_block->AddInstruction(new (GetGraph()->GetAllocator()) HGoto()); + false_block->AddInstruction(new (GetGraph()->GetAllocator()) HGoto()); + new_preheader->AddInstruction(new (GetGraph()->GetAllocator()) HGoto()); // Insert the taken-test to see if the loop body is entered. If the // loop isn't entered at all, it jumps around the deoptimization block. - if_block->AddInstruction(new (GetGraph()->GetArena()) HGoto()); // placeholder + if_block->AddInstruction(new (GetGraph()->GetAllocator()) HGoto()); // placeholder HInstruction* condition = induction_range_.GenerateTakenTest( header->GetLastInstruction(), GetGraph(), if_block); DCHECK(condition != nullptr); if_block->RemoveInstruction(if_block->GetLastInstruction()); - if_block->AddInstruction(new (GetGraph()->GetArena()) HIf(condition)); + if_block->AddInstruction(new (GetGraph()->GetAllocator()) HIf(condition)); taken_test_loop_.Put(loop_id, true_block); } @@ -1853,8 +1860,8 @@ class BCEVisitor : public HGraphVisitor { case DataType::Type::kFloat64: zero = graph->GetDoubleConstant(0); break; default: zero = graph->GetConstant(type, 0); break; } - HPhi* phi = new (graph->GetArena()) - HPhi(graph->GetArena(), kNoRegNumber, /*number_of_inputs*/ 2, HPhi::ToPhiType(type)); + HPhi* phi = new (graph->GetAllocator()) + HPhi(graph->GetAllocator(), kNoRegNumber, /*number_of_inputs*/ 2, HPhi::ToPhiType(type)); phi->SetRawInputAt(0, instruction); phi->SetRawInputAt(1, zero); if (type == DataType::Type::kReference) { diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc index 851838c4b8..1523478613 100644 --- a/compiler/optimizing/bounds_check_elimination_test.cc +++ b/compiler/optimizing/bounds_check_elimination_test.cc @@ -32,10 +32,9 @@ namespace art { /** * Fixture class for the BoundsCheckElimination tests. */ -class BoundsCheckEliminationTest : public testing::Test { +class BoundsCheckEliminationTest : public OptimizingUnitTest { public: - BoundsCheckEliminationTest() : pool_(), allocator_(&pool_) { - graph_ = CreateGraph(&allocator_); + BoundsCheckEliminationTest() : graph_(CreateGraph()) { graph_->SetHasBoundsChecks(true); } @@ -57,8 +56,6 @@ class BoundsCheckEliminationTest : public testing::Test { BoundsCheckElimination(graph_, side_effects, &induction).Run(); } - ArenaPool pool_; - ArenaAllocator allocator_; HGraph* graph_; }; @@ -67,12 +64,12 @@ class BoundsCheckEliminationTest : public testing::Test { // else if (i >= array.length) { array[i] = 1; // Can't eliminate. } // else { array[i] = 1; // Can eliminate. } TEST_F(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) { - HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(entry); graph_->SetEntryBlock(entry); - HInstruction* parameter1 = new (&allocator_) HParameterValue( + HInstruction* parameter1 = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); // array - HInstruction* parameter2 = new (&allocator_) HParameterValue( + HInstruction* parameter2 = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); // i entry->AddInstruction(parameter1); entry->AddInstruction(parameter2); @@ -80,70 +77,70 @@ TEST_F(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) { HInstruction* constant_1 = graph_->GetIntConstant(1); HInstruction* constant_0 = graph_->GetIntConstant(0); - HBasicBlock* block1 = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* block1 = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(block1); - HInstruction* cmp = new (&allocator_) HGreaterThanOrEqual(parameter2, constant_0); - HIf* if_inst = new (&allocator_) HIf(cmp); + HInstruction* cmp = new (GetAllocator()) HGreaterThanOrEqual(parameter2, constant_0); + HIf* if_inst = new (GetAllocator()) HIf(cmp); block1->AddInstruction(cmp); block1->AddInstruction(if_inst); entry->AddSuccessor(block1); - HBasicBlock* block2 = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* block2 = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(block2); - HNullCheck* null_check = new (&allocator_) HNullCheck(parameter1, 0); - HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0); - HBoundsCheck* bounds_check2 = new (&allocator_) + HNullCheck* null_check = new (GetAllocator()) HNullCheck(parameter1, 0); + HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HBoundsCheck* bounds_check2 = new (GetAllocator()) HBoundsCheck(parameter2, array_length, 0); - HArraySet* array_set = new (&allocator_) HArraySet( + HArraySet* array_set = new (GetAllocator()) HArraySet( null_check, bounds_check2, constant_1, DataType::Type::kInt32, 0); block2->AddInstruction(null_check); block2->AddInstruction(array_length); block2->AddInstruction(bounds_check2); block2->AddInstruction(array_set); - HBasicBlock* block3 = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* block3 = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(block3); - null_check = new (&allocator_) HNullCheck(parameter1, 0); - array_length = new (&allocator_) HArrayLength(null_check, 0); - cmp = new (&allocator_) HLessThan(parameter2, array_length); - if_inst = new (&allocator_) HIf(cmp); + null_check = new (GetAllocator()) HNullCheck(parameter1, 0); + array_length = new (GetAllocator()) HArrayLength(null_check, 0); + cmp = new (GetAllocator()) HLessThan(parameter2, array_length); + if_inst = new (GetAllocator()) HIf(cmp); block3->AddInstruction(null_check); block3->AddInstruction(array_length); block3->AddInstruction(cmp); block3->AddInstruction(if_inst); - HBasicBlock* block4 = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* block4 = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(block4); - null_check = new (&allocator_) HNullCheck(parameter1, 0); - array_length = new (&allocator_) HArrayLength(null_check, 0); - HBoundsCheck* bounds_check4 = new (&allocator_) + null_check = new (GetAllocator()) HNullCheck(parameter1, 0); + array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HBoundsCheck* bounds_check4 = new (GetAllocator()) HBoundsCheck(parameter2, array_length, 0); - array_set = new (&allocator_) HArraySet( + array_set = new (GetAllocator()) HArraySet( null_check, bounds_check4, constant_1, DataType::Type::kInt32, 0); block4->AddInstruction(null_check); block4->AddInstruction(array_length); block4->AddInstruction(bounds_check4); block4->AddInstruction(array_set); - HBasicBlock* block5 = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* block5 = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(block5); - null_check = new (&allocator_) HNullCheck(parameter1, 0); - array_length = new (&allocator_) HArrayLength(null_check, 0); - HBoundsCheck* bounds_check5 = new (&allocator_) + null_check = new (GetAllocator()) HNullCheck(parameter1, 0); + array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HBoundsCheck* bounds_check5 = new (GetAllocator()) HBoundsCheck(parameter2, array_length, 0); - array_set = new (&allocator_) HArraySet( + array_set = new (GetAllocator()) HArraySet( null_check, bounds_check5, constant_1, DataType::Type::kInt32, 0); block5->AddInstruction(null_check); block5->AddInstruction(array_length); block5->AddInstruction(bounds_check5); block5->AddInstruction(array_set); - HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(exit); block2->AddSuccessor(exit); block4->AddSuccessor(exit); block5->AddSuccessor(exit); - exit->AddInstruction(new (&allocator_) HExit()); + exit->AddInstruction(new (GetAllocator()) HExit()); block1->AddSuccessor(block3); // True successor block1->AddSuccessor(block2); // False successor @@ -164,12 +161,12 @@ TEST_F(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) { // if (j < array.length) array[j] = 1; // Can't eliminate. // } TEST_F(BoundsCheckEliminationTest, OverflowArrayBoundsElimination) { - HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(entry); graph_->SetEntryBlock(entry); - HInstruction* parameter1 = new (&allocator_) HParameterValue( + HInstruction* parameter1 = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); // array - HInstruction* parameter2 = new (&allocator_) HParameterValue( + HInstruction* parameter2 = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); // i entry->AddInstruction(parameter1); entry->AddInstruction(parameter2); @@ -178,39 +175,40 @@ TEST_F(BoundsCheckEliminationTest, OverflowArrayBoundsElimination) { HInstruction* constant_0 = graph_->GetIntConstant(0); HInstruction* constant_max_int = graph_->GetIntConstant(INT_MAX); - HBasicBlock* block1 = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* block1 = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(block1); - HInstruction* cmp = new (&allocator_) HLessThanOrEqual(parameter2, constant_0); - HIf* if_inst = new (&allocator_) HIf(cmp); + HInstruction* cmp = new (GetAllocator()) HLessThanOrEqual(parameter2, constant_0); + HIf* if_inst = new (GetAllocator()) HIf(cmp); block1->AddInstruction(cmp); block1->AddInstruction(if_inst); entry->AddSuccessor(block1); - HBasicBlock* block2 = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* block2 = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(block2); - HInstruction* add = new (&allocator_) HAdd(DataType::Type::kInt32, parameter2, constant_max_int); - HNullCheck* null_check = new (&allocator_) HNullCheck(parameter1, 0); - HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0); - HInstruction* cmp2 = new (&allocator_) HGreaterThanOrEqual(add, array_length); - if_inst = new (&allocator_) HIf(cmp2); + HInstruction* add = + new (GetAllocator()) HAdd(DataType::Type::kInt32, parameter2, constant_max_int); + HNullCheck* null_check = new (GetAllocator()) HNullCheck(parameter1, 0); + HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HInstruction* cmp2 = new (GetAllocator()) HGreaterThanOrEqual(add, array_length); + if_inst = new (GetAllocator()) HIf(cmp2); block2->AddInstruction(add); block2->AddInstruction(null_check); block2->AddInstruction(array_length); block2->AddInstruction(cmp2); block2->AddInstruction(if_inst); - HBasicBlock* block3 = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* block3 = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(block3); - HBoundsCheck* bounds_check = new (&allocator_) + HBoundsCheck* bounds_check = new (GetAllocator()) HBoundsCheck(add, array_length, 0); - HArraySet* array_set = new (&allocator_) HArraySet( + HArraySet* array_set = new (GetAllocator()) HArraySet( null_check, bounds_check, constant_1, DataType::Type::kInt32, 0); block3->AddInstruction(bounds_check); block3->AddInstruction(array_set); - HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(exit); - exit->AddInstruction(new (&allocator_) HExit()); + exit->AddInstruction(new (GetAllocator()) HExit()); block1->AddSuccessor(exit); // true successor block1->AddSuccessor(block2); // false successor block2->AddSuccessor(exit); // true successor @@ -228,12 +226,12 @@ TEST_F(BoundsCheckEliminationTest, OverflowArrayBoundsElimination) { // if (j > 0) array[j] = 1; // Can't eliminate. // } TEST_F(BoundsCheckEliminationTest, UnderflowArrayBoundsElimination) { - HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(entry); graph_->SetEntryBlock(entry); - HInstruction* parameter1 = new (&allocator_) HParameterValue( + HInstruction* parameter1 = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); // array - HInstruction* parameter2 = new (&allocator_) HParameterValue( + HInstruction* parameter2 = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); // i entry->AddInstruction(parameter1); entry->AddInstruction(parameter2); @@ -242,41 +240,42 @@ TEST_F(BoundsCheckEliminationTest, UnderflowArrayBoundsElimination) { HInstruction* constant_0 = graph_->GetIntConstant(0); HInstruction* constant_max_int = graph_->GetIntConstant(INT_MAX); - HBasicBlock* block1 = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* block1 = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(block1); - HNullCheck* null_check = new (&allocator_) HNullCheck(parameter1, 0); - HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0); - HInstruction* cmp = new (&allocator_) HGreaterThanOrEqual(parameter2, array_length); - HIf* if_inst = new (&allocator_) HIf(cmp); + HNullCheck* null_check = new (GetAllocator()) HNullCheck(parameter1, 0); + HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HInstruction* cmp = new (GetAllocator()) HGreaterThanOrEqual(parameter2, array_length); + HIf* if_inst = new (GetAllocator()) HIf(cmp); block1->AddInstruction(null_check); block1->AddInstruction(array_length); block1->AddInstruction(cmp); block1->AddInstruction(if_inst); entry->AddSuccessor(block1); - HBasicBlock* block2 = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* block2 = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(block2); - HInstruction* sub1 = new (&allocator_) HSub(DataType::Type::kInt32, parameter2, constant_max_int); - HInstruction* sub2 = new (&allocator_) HSub(DataType::Type::kInt32, sub1, constant_max_int); - HInstruction* cmp2 = new (&allocator_) HLessThanOrEqual(sub2, constant_0); - if_inst = new (&allocator_) HIf(cmp2); + HInstruction* sub1 = + new (GetAllocator()) HSub(DataType::Type::kInt32, parameter2, constant_max_int); + HInstruction* sub2 = new (GetAllocator()) HSub(DataType::Type::kInt32, sub1, constant_max_int); + HInstruction* cmp2 = new (GetAllocator()) HLessThanOrEqual(sub2, constant_0); + if_inst = new (GetAllocator()) HIf(cmp2); block2->AddInstruction(sub1); block2->AddInstruction(sub2); block2->AddInstruction(cmp2); block2->AddInstruction(if_inst); - HBasicBlock* block3 = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* block3 = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(block3); - HBoundsCheck* bounds_check = new (&allocator_) + HBoundsCheck* bounds_check = new (GetAllocator()) HBoundsCheck(sub2, array_length, 0); - HArraySet* array_set = new (&allocator_) HArraySet( + HArraySet* array_set = new (GetAllocator()) HArraySet( null_check, bounds_check, constant_1, DataType::Type::kInt32, 0); block3->AddInstruction(bounds_check); block3->AddInstruction(array_set); - HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(exit); - exit->AddInstruction(new (&allocator_) HExit()); + exit->AddInstruction(new (GetAllocator()) HExit()); block1->AddSuccessor(exit); // true successor block1->AddSuccessor(block2); // false successor block2->AddSuccessor(exit); // true successor @@ -292,10 +291,10 @@ TEST_F(BoundsCheckEliminationTest, UnderflowArrayBoundsElimination) { // array[5] = 1; // Can eliminate. // array[4] = 1; // Can eliminate. TEST_F(BoundsCheckEliminationTest, ConstantArrayBoundsElimination) { - HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(entry); graph_->SetEntryBlock(entry); - HInstruction* parameter = new (&allocator_) HParameterValue( + HInstruction* parameter = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); entry->AddInstruction(parameter); @@ -304,49 +303,49 @@ TEST_F(BoundsCheckEliminationTest, ConstantArrayBoundsElimination) { HInstruction* constant_6 = graph_->GetIntConstant(6); HInstruction* constant_1 = graph_->GetIntConstant(1); - HBasicBlock* block = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(block); entry->AddSuccessor(block); - HNullCheck* null_check = new (&allocator_) HNullCheck(parameter, 0); - HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0); - HBoundsCheck* bounds_check6 = new (&allocator_) + HNullCheck* null_check = new (GetAllocator()) HNullCheck(parameter, 0); + HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HBoundsCheck* bounds_check6 = new (GetAllocator()) HBoundsCheck(constant_6, array_length, 0); - HInstruction* array_set = new (&allocator_) HArraySet( + HInstruction* array_set = new (GetAllocator()) HArraySet( null_check, bounds_check6, constant_1, DataType::Type::kInt32, 0); block->AddInstruction(null_check); block->AddInstruction(array_length); block->AddInstruction(bounds_check6); block->AddInstruction(array_set); - null_check = new (&allocator_) HNullCheck(parameter, 0); - array_length = new (&allocator_) HArrayLength(null_check, 0); - HBoundsCheck* bounds_check5 = new (&allocator_) + null_check = new (GetAllocator()) HNullCheck(parameter, 0); + array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HBoundsCheck* bounds_check5 = new (GetAllocator()) HBoundsCheck(constant_5, array_length, 0); - array_set = new (&allocator_) HArraySet( + array_set = new (GetAllocator()) HArraySet( null_check, bounds_check5, constant_1, DataType::Type::kInt32, 0); block->AddInstruction(null_check); block->AddInstruction(array_length); block->AddInstruction(bounds_check5); block->AddInstruction(array_set); - null_check = new (&allocator_) HNullCheck(parameter, 0); - array_length = new (&allocator_) HArrayLength(null_check, 0); - HBoundsCheck* bounds_check4 = new (&allocator_) + null_check = new (GetAllocator()) HNullCheck(parameter, 0); + array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HBoundsCheck* bounds_check4 = new (GetAllocator()) HBoundsCheck(constant_4, array_length, 0); - array_set = new (&allocator_) HArraySet( + array_set = new (GetAllocator()) HArraySet( null_check, bounds_check4, constant_1, DataType::Type::kInt32, 0); block->AddInstruction(null_check); block->AddInstruction(array_length); block->AddInstruction(bounds_check4); block->AddInstruction(array_set); - block->AddInstruction(new (&allocator_) HGoto()); + block->AddInstruction(new (GetAllocator()) HGoto()); - HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(exit); block->AddSuccessor(exit); - exit->AddInstruction(new (&allocator_) HExit()); + exit->AddInstruction(new (GetAllocator()) HExit()); RunBCE(); @@ -429,28 +428,28 @@ static HInstruction* BuildSSAGraph1(HGraph* graph, TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1a) { // for (int i=0; i<array.length; i++) { array[i] = 10; // Can eliminate with gvn. } - HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 0, 1); + HInstruction* bounds_check = BuildSSAGraph1(graph_, GetAllocator(), 0, 1); RunBCE(); ASSERT_TRUE(IsRemoved(bounds_check)); } TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1b) { // for (int i=1; i<array.length; i++) { array[i] = 10; // Can eliminate. } - HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 1, 1); + HInstruction* bounds_check = BuildSSAGraph1(graph_, GetAllocator(), 1, 1); RunBCE(); ASSERT_TRUE(IsRemoved(bounds_check)); } TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1c) { // for (int i=-1; i<array.length; i++) { array[i] = 10; // Can't eliminate. } - HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, -1, 1); + HInstruction* bounds_check = BuildSSAGraph1(graph_, GetAllocator(), -1, 1); RunBCE(); ASSERT_FALSE(IsRemoved(bounds_check)); } TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1d) { // for (int i=0; i<=array.length; i++) { array[i] = 10; // Can't eliminate. } - HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 0, 1, kCondGT); + HInstruction* bounds_check = BuildSSAGraph1(graph_, GetAllocator(), 0, 1, kCondGT); RunBCE(); ASSERT_FALSE(IsRemoved(bounds_check)); } @@ -458,14 +457,14 @@ TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1d) { TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1e) { // for (int i=0; i<array.length; i += 2) { // array[i] = 10; // Can't eliminate due to overflow concern. } - HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 0, 2); + HInstruction* bounds_check = BuildSSAGraph1(graph_, GetAllocator(), 0, 2); RunBCE(); ASSERT_FALSE(IsRemoved(bounds_check)); } TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1f) { // for (int i=1; i<array.length; i += 2) { array[i] = 10; // Can eliminate. } - HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 1, 2); + HInstruction* bounds_check = BuildSSAGraph1(graph_, GetAllocator(), 1, 2); RunBCE(); ASSERT_TRUE(IsRemoved(bounds_check)); } @@ -546,35 +545,35 @@ static HInstruction* BuildSSAGraph2(HGraph *graph, TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2a) { // for (int i=array.length; i>0; i--) { array[i-1] = 10; // Can eliminate with gvn. } - HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, 0); + HInstruction* bounds_check = BuildSSAGraph2(graph_, GetAllocator(), 0); RunBCE(); ASSERT_TRUE(IsRemoved(bounds_check)); } TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2b) { // for (int i=array.length; i>1; i--) { array[i-1] = 10; // Can eliminate. } - HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, 1); + HInstruction* bounds_check = BuildSSAGraph2(graph_, GetAllocator(), 1); RunBCE(); ASSERT_TRUE(IsRemoved(bounds_check)); } TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2c) { // for (int i=array.length; i>-1; i--) { array[i-1] = 10; // Can't eliminate. } - HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, -1); + HInstruction* bounds_check = BuildSSAGraph2(graph_, GetAllocator(), -1); RunBCE(); ASSERT_FALSE(IsRemoved(bounds_check)); } TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2d) { // for (int i=array.length; i>=0; i--) { array[i-1] = 10; // Can't eliminate. } - HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, 0, -1, kCondLT); + HInstruction* bounds_check = BuildSSAGraph2(graph_, GetAllocator(), 0, -1, kCondLT); RunBCE(); ASSERT_FALSE(IsRemoved(bounds_check)); } TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2e) { // for (int i=array.length; i>0; i-=2) { array[i-1] = 10; // Can eliminate. } - HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, 0, -2); + HInstruction* bounds_check = BuildSSAGraph2(graph_, GetAllocator(), 0, -2); RunBCE(); ASSERT_TRUE(IsRemoved(bounds_check)); } @@ -653,7 +652,7 @@ static HInstruction* BuildSSAGraph3(HGraph* graph, TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3a) { // int[] array = new int[10]; // for (int i=0; i<10; i++) { array[i] = 10; // Can eliminate. } - HInstruction* bounds_check = BuildSSAGraph3(graph_, &allocator_, 0, 1, kCondGE); + HInstruction* bounds_check = BuildSSAGraph3(graph_, GetAllocator(), 0, 1, kCondGE); RunBCE(); ASSERT_TRUE(IsRemoved(bounds_check)); } @@ -661,7 +660,7 @@ TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3a) { TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3b) { // int[] array = new int[10]; // for (int i=1; i<10; i++) { array[i] = 10; // Can eliminate. } - HInstruction* bounds_check = BuildSSAGraph3(graph_, &allocator_, 1, 1, kCondGE); + HInstruction* bounds_check = BuildSSAGraph3(graph_, GetAllocator(), 1, 1, kCondGE); RunBCE(); ASSERT_TRUE(IsRemoved(bounds_check)); } @@ -669,7 +668,7 @@ TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3b) { TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3c) { // int[] array = new int[10]; // for (int i=0; i<=10; i++) { array[i] = 10; // Can't eliminate. } - HInstruction* bounds_check = BuildSSAGraph3(graph_, &allocator_, 0, 1, kCondGT); + HInstruction* bounds_check = BuildSSAGraph3(graph_, GetAllocator(), 0, 1, kCondGT); RunBCE(); ASSERT_FALSE(IsRemoved(bounds_check)); } @@ -677,7 +676,7 @@ TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3c) { TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3d) { // int[] array = new int[10]; // for (int i=1; i<10; i+=8) { array[i] = 10; // Can eliminate. } - HInstruction* bounds_check = BuildSSAGraph3(graph_, &allocator_, 1, 8, kCondGE); + HInstruction* bounds_check = BuildSSAGraph3(graph_, GetAllocator(), 1, 8, kCondGE); RunBCE(); ASSERT_TRUE(IsRemoved(bounds_check)); } @@ -759,21 +758,21 @@ static HInstruction* BuildSSAGraph4(HGraph* graph, TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination4a) { // for (int i=0; i<array.length; i++) { array[array.length-i-1] = 10; // Can eliminate with gvn. } - HInstruction* bounds_check = BuildSSAGraph4(graph_, &allocator_, 0); + HInstruction* bounds_check = BuildSSAGraph4(graph_, GetAllocator(), 0); RunBCE(); ASSERT_TRUE(IsRemoved(bounds_check)); } TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination4b) { // for (int i=1; i<array.length; i++) { array[array.length-i-1] = 10; // Can eliminate. } - HInstruction* bounds_check = BuildSSAGraph4(graph_, &allocator_, 1); + HInstruction* bounds_check = BuildSSAGraph4(graph_, GetAllocator(), 1); RunBCE(); ASSERT_TRUE(IsRemoved(bounds_check)); } TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination4c) { // for (int i=0; i<=array.length; i++) { array[array.length-i] = 10; // Can't eliminate. } - HInstruction* bounds_check = BuildSSAGraph4(graph_, &allocator_, 0, kCondGT); + HInstruction* bounds_check = BuildSSAGraph4(graph_, GetAllocator(), 0, kCondGT); RunBCE(); ASSERT_FALSE(IsRemoved(bounds_check)); } @@ -790,10 +789,10 @@ TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination4c) { // } // } TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) { - HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(entry); graph_->SetEntryBlock(entry); - HInstruction* parameter = new (&allocator_) HParameterValue( + HInstruction* parameter = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); entry->AddInstruction(parameter); @@ -801,23 +800,23 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) { HInstruction* constant_minus_1 = graph_->GetIntConstant(-1); HInstruction* constant_1 = graph_->GetIntConstant(1); - HBasicBlock* block = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(block); entry->AddSuccessor(block); - block->AddInstruction(new (&allocator_) HGoto()); + block->AddInstruction(new (GetAllocator()) HGoto()); - HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(exit); - exit->AddInstruction(new (&allocator_) HExit()); + exit->AddInstruction(new (GetAllocator()) HExit()); - HBasicBlock* outer_header = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* outer_header = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(outer_header); - HPhi* phi_i = new (&allocator_) HPhi(&allocator_, 0, 0, DataType::Type::kInt32); - HNullCheck* null_check = new (&allocator_) HNullCheck(parameter, 0); - HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0); - HAdd* add = new (&allocator_) HAdd(DataType::Type::kInt32, array_length, constant_minus_1); - HInstruction* cmp = new (&allocator_) HGreaterThanOrEqual(phi_i, add); - HIf* if_inst = new (&allocator_) HIf(cmp); + HPhi* phi_i = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32); + HNullCheck* null_check = new (GetAllocator()) HNullCheck(parameter, 0); + HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HAdd* add = new (GetAllocator()) HAdd(DataType::Type::kInt32, array_length, constant_minus_1); + HInstruction* cmp = new (GetAllocator()) HGreaterThanOrEqual(phi_i, add); + HIf* if_inst = new (GetAllocator()) HIf(cmp); outer_header->AddPhi(phi_i); outer_header->AddInstruction(null_check); outer_header->AddInstruction(array_length); @@ -826,15 +825,15 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) { outer_header->AddInstruction(if_inst); phi_i->AddInput(constant_0); - HBasicBlock* inner_header = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* inner_header = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(inner_header); - HPhi* phi_j = new (&allocator_) HPhi(&allocator_, 0, 0, DataType::Type::kInt32); - null_check = new (&allocator_) HNullCheck(parameter, 0); - array_length = new (&allocator_) HArrayLength(null_check, 0); - HSub* sub = new (&allocator_) HSub(DataType::Type::kInt32, array_length, phi_i); - add = new (&allocator_) HAdd(DataType::Type::kInt32, sub, constant_minus_1); - cmp = new (&allocator_) HGreaterThanOrEqual(phi_j, add); - if_inst = new (&allocator_) HIf(cmp); + HPhi* phi_j = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32); + null_check = new (GetAllocator()) HNullCheck(parameter, 0); + array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HSub* sub = new (GetAllocator()) HSub(DataType::Type::kInt32, array_length, phi_i); + add = new (GetAllocator()) HAdd(DataType::Type::kInt32, sub, constant_minus_1); + cmp = new (GetAllocator()) HGreaterThanOrEqual(phi_j, add); + if_inst = new (GetAllocator()) HIf(cmp); inner_header->AddPhi(phi_j); inner_header->AddInstruction(null_check); inner_header->AddInstruction(array_length); @@ -844,25 +843,25 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) { inner_header->AddInstruction(if_inst); phi_j->AddInput(constant_0); - HBasicBlock* inner_body_compare = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* inner_body_compare = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(inner_body_compare); - null_check = new (&allocator_) HNullCheck(parameter, 0); - array_length = new (&allocator_) HArrayLength(null_check, 0); - HBoundsCheck* bounds_check1 = new (&allocator_) HBoundsCheck(phi_j, array_length, 0); - HArrayGet* array_get_j = new (&allocator_) + null_check = new (GetAllocator()) HNullCheck(parameter, 0); + array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HBoundsCheck* bounds_check1 = new (GetAllocator()) HBoundsCheck(phi_j, array_length, 0); + HArrayGet* array_get_j = new (GetAllocator()) HArrayGet(null_check, bounds_check1, DataType::Type::kInt32, 0); inner_body_compare->AddInstruction(null_check); inner_body_compare->AddInstruction(array_length); inner_body_compare->AddInstruction(bounds_check1); inner_body_compare->AddInstruction(array_get_j); - HInstruction* j_plus_1 = new (&allocator_) HAdd(DataType::Type::kInt32, phi_j, constant_1); - null_check = new (&allocator_) HNullCheck(parameter, 0); - array_length = new (&allocator_) HArrayLength(null_check, 0); - HBoundsCheck* bounds_check2 = new (&allocator_) HBoundsCheck(j_plus_1, array_length, 0); - HArrayGet* array_get_j_plus_1 = new (&allocator_) + HInstruction* j_plus_1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi_j, constant_1); + null_check = new (GetAllocator()) HNullCheck(parameter, 0); + array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HBoundsCheck* bounds_check2 = new (GetAllocator()) HBoundsCheck(j_plus_1, array_length, 0); + HArrayGet* array_get_j_plus_1 = new (GetAllocator()) HArrayGet(null_check, bounds_check2, DataType::Type::kInt32, 0); - cmp = new (&allocator_) HGreaterThanOrEqual(array_get_j, array_get_j_plus_1); - if_inst = new (&allocator_) HIf(cmp); + cmp = new (GetAllocator()) HGreaterThanOrEqual(array_get_j, array_get_j_plus_1); + if_inst = new (GetAllocator()) HIf(cmp); inner_body_compare->AddInstruction(j_plus_1); inner_body_compare->AddInstruction(null_check); inner_body_compare->AddInstruction(array_length); @@ -871,14 +870,14 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) { inner_body_compare->AddInstruction(cmp); inner_body_compare->AddInstruction(if_inst); - HBasicBlock* inner_body_swap = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* inner_body_swap = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(inner_body_swap); - j_plus_1 = new (&allocator_) HAdd(DataType::Type::kInt32, phi_j, constant_1); + j_plus_1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi_j, constant_1); // temp = array[j+1] - null_check = new (&allocator_) HNullCheck(parameter, 0); - array_length = new (&allocator_) HArrayLength(null_check, 0); - HInstruction* bounds_check3 = new (&allocator_) HBoundsCheck(j_plus_1, array_length, 0); - array_get_j_plus_1 = new (&allocator_) + null_check = new (GetAllocator()) HNullCheck(parameter, 0); + array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HInstruction* bounds_check3 = new (GetAllocator()) HBoundsCheck(j_plus_1, array_length, 0); + array_get_j_plus_1 = new (GetAllocator()) HArrayGet(null_check, bounds_check3, DataType::Type::kInt32, 0); inner_body_swap->AddInstruction(j_plus_1); inner_body_swap->AddInstruction(null_check); @@ -886,48 +885,48 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) { inner_body_swap->AddInstruction(bounds_check3); inner_body_swap->AddInstruction(array_get_j_plus_1); // array[j+1] = array[j] - null_check = new (&allocator_) HNullCheck(parameter, 0); - array_length = new (&allocator_) HArrayLength(null_check, 0); - HInstruction* bounds_check4 = new (&allocator_) HBoundsCheck(phi_j, array_length, 0); - array_get_j = new (&allocator_) + null_check = new (GetAllocator()) HNullCheck(parameter, 0); + array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HInstruction* bounds_check4 = new (GetAllocator()) HBoundsCheck(phi_j, array_length, 0); + array_get_j = new (GetAllocator()) HArrayGet(null_check, bounds_check4, DataType::Type::kInt32, 0); inner_body_swap->AddInstruction(null_check); inner_body_swap->AddInstruction(array_length); inner_body_swap->AddInstruction(bounds_check4); inner_body_swap->AddInstruction(array_get_j); - null_check = new (&allocator_) HNullCheck(parameter, 0); - array_length = new (&allocator_) HArrayLength(null_check, 0); - HInstruction* bounds_check5 = new (&allocator_) HBoundsCheck(j_plus_1, array_length, 0); - HArraySet* array_set_j_plus_1 = new (&allocator_) + null_check = new (GetAllocator()) HNullCheck(parameter, 0); + array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HInstruction* bounds_check5 = new (GetAllocator()) HBoundsCheck(j_plus_1, array_length, 0); + HArraySet* array_set_j_plus_1 = new (GetAllocator()) HArraySet(null_check, bounds_check5, array_get_j, DataType::Type::kInt32, 0); inner_body_swap->AddInstruction(null_check); inner_body_swap->AddInstruction(array_length); inner_body_swap->AddInstruction(bounds_check5); inner_body_swap->AddInstruction(array_set_j_plus_1); // array[j] = temp - null_check = new (&allocator_) HNullCheck(parameter, 0); - array_length = new (&allocator_) HArrayLength(null_check, 0); - HInstruction* bounds_check6 = new (&allocator_) HBoundsCheck(phi_j, array_length, 0); - HArraySet* array_set_j = new (&allocator_) + null_check = new (GetAllocator()) HNullCheck(parameter, 0); + array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HInstruction* bounds_check6 = new (GetAllocator()) HBoundsCheck(phi_j, array_length, 0); + HArraySet* array_set_j = new (GetAllocator()) HArraySet(null_check, bounds_check6, array_get_j_plus_1, DataType::Type::kInt32, 0); inner_body_swap->AddInstruction(null_check); inner_body_swap->AddInstruction(array_length); inner_body_swap->AddInstruction(bounds_check6); inner_body_swap->AddInstruction(array_set_j); - inner_body_swap->AddInstruction(new (&allocator_) HGoto()); + inner_body_swap->AddInstruction(new (GetAllocator()) HGoto()); - HBasicBlock* inner_body_add = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* inner_body_add = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(inner_body_add); - add = new (&allocator_) HAdd(DataType::Type::kInt32, phi_j, constant_1); + add = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi_j, constant_1); inner_body_add->AddInstruction(add); - inner_body_add->AddInstruction(new (&allocator_) HGoto()); + inner_body_add->AddInstruction(new (GetAllocator()) HGoto()); phi_j->AddInput(add); - HBasicBlock* outer_body_add = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* outer_body_add = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(outer_body_add); - add = new (&allocator_) HAdd(DataType::Type::kInt32, phi_i, constant_1); + add = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi_i, constant_1); outer_body_add->AddInstruction(add); - outer_body_add->AddInstruction(new (&allocator_) HGoto()); + outer_body_add->AddInstruction(new (GetAllocator()) HGoto()); phi_i->AddInput(add); block->AddSuccessor(outer_header); @@ -961,10 +960,10 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) { // array[param_i%10] = 10; // Can't eliminate, when param_i < 0 // } TEST_F(BoundsCheckEliminationTest, ModArrayBoundsElimination) { - HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(entry); graph_->SetEntryBlock(entry); - HInstruction* param_i = new (&allocator_) + HInstruction* param_i = new (GetAllocator()) HParameterValue(graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); entry->AddInstruction(param_i); @@ -974,17 +973,17 @@ TEST_F(BoundsCheckEliminationTest, ModArrayBoundsElimination) { HInstruction* constant_200 = graph_->GetIntConstant(200); HInstruction* constant_minus_10 = graph_->GetIntConstant(-10); - HBasicBlock* block = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(block); entry->AddSuccessor(block); // We pass a bogus constant for the class to avoid mocking one. - HInstruction* new_array = new (&allocator_) HNewArray(constant_10, constant_10, 0); + HInstruction* new_array = new (GetAllocator()) HNewArray(constant_10, constant_10, 0); block->AddInstruction(new_array); - block->AddInstruction(new (&allocator_) HGoto()); + block->AddInstruction(new (GetAllocator()) HGoto()); - HBasicBlock* loop_header = new (&allocator_) HBasicBlock(graph_); - HBasicBlock* loop_body = new (&allocator_) HBasicBlock(graph_); - HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* loop_header = new (GetAllocator()) HBasicBlock(graph_); + HBasicBlock* loop_body = new (GetAllocator()) HBasicBlock(graph_); + HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(loop_header); graph_->AddBlock(loop_body); @@ -994,9 +993,9 @@ TEST_F(BoundsCheckEliminationTest, ModArrayBoundsElimination) { loop_header->AddSuccessor(loop_body); // false successor loop_body->AddSuccessor(loop_header); - HPhi* phi = new (&allocator_) HPhi(&allocator_, 0, 0, DataType::Type::kInt32); - HInstruction* cmp = new (&allocator_) HGreaterThanOrEqual(phi, constant_200); - HInstruction* if_inst = new (&allocator_) HIf(cmp); + HPhi* phi = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32); + HInstruction* cmp = new (GetAllocator()) HGreaterThanOrEqual(phi, constant_200); + HInstruction* if_inst = new (GetAllocator()) HIf(cmp); loop_header->AddPhi(phi); loop_header->AddInstruction(cmp); loop_header->AddInstruction(if_inst); @@ -1005,49 +1004,52 @@ TEST_F(BoundsCheckEliminationTest, ModArrayBoundsElimination) { ////////////////////////////////////////////////////////////////////////////////// // LOOP BODY: // array[i % 10] = 10; - HRem* i_mod_10 = new (&allocator_) HRem(DataType::Type::kInt32, phi, constant_10, 0); - HBoundsCheck* bounds_check_i_mod_10 = new (&allocator_) HBoundsCheck(i_mod_10, constant_10, 0); - HInstruction* array_set = new (&allocator_) HArraySet( + HRem* i_mod_10 = new (GetAllocator()) HRem(DataType::Type::kInt32, phi, constant_10, 0); + HBoundsCheck* bounds_check_i_mod_10 = new (GetAllocator()) HBoundsCheck(i_mod_10, constant_10, 0); + HInstruction* array_set = new (GetAllocator()) HArraySet( new_array, bounds_check_i_mod_10, constant_10, DataType::Type::kInt32, 0); loop_body->AddInstruction(i_mod_10); loop_body->AddInstruction(bounds_check_i_mod_10); loop_body->AddInstruction(array_set); // array[i % 1] = 10; - HRem* i_mod_1 = new (&allocator_) HRem(DataType::Type::kInt32, phi, constant_1, 0); - HBoundsCheck* bounds_check_i_mod_1 = new (&allocator_) HBoundsCheck(i_mod_1, constant_10, 0); - array_set = new (&allocator_) HArraySet( + HRem* i_mod_1 = new (GetAllocator()) HRem(DataType::Type::kInt32, phi, constant_1, 0); + HBoundsCheck* bounds_check_i_mod_1 = new (GetAllocator()) HBoundsCheck(i_mod_1, constant_10, 0); + array_set = new (GetAllocator()) HArraySet( new_array, bounds_check_i_mod_1, constant_10, DataType::Type::kInt32, 0); loop_body->AddInstruction(i_mod_1); loop_body->AddInstruction(bounds_check_i_mod_1); loop_body->AddInstruction(array_set); // array[i % 200] = 10; - HRem* i_mod_200 = new (&allocator_) HRem(DataType::Type::kInt32, phi, constant_1, 0); - HBoundsCheck* bounds_check_i_mod_200 = new (&allocator_) HBoundsCheck(i_mod_200, constant_10, 0); - array_set = new (&allocator_) HArraySet( + HRem* i_mod_200 = new (GetAllocator()) HRem(DataType::Type::kInt32, phi, constant_1, 0); + HBoundsCheck* bounds_check_i_mod_200 = new (GetAllocator()) HBoundsCheck( + i_mod_200, constant_10, 0); + array_set = new (GetAllocator()) HArraySet( new_array, bounds_check_i_mod_200, constant_10, DataType::Type::kInt32, 0); loop_body->AddInstruction(i_mod_200); loop_body->AddInstruction(bounds_check_i_mod_200); loop_body->AddInstruction(array_set); // array[i % -10] = 10; - HRem* i_mod_minus_10 = new (&allocator_) HRem(DataType::Type::kInt32, phi, constant_minus_10, 0); - HBoundsCheck* bounds_check_i_mod_minus_10 = new (&allocator_) HBoundsCheck( + HRem* i_mod_minus_10 = new (GetAllocator()) HRem( + DataType::Type::kInt32, phi, constant_minus_10, 0); + HBoundsCheck* bounds_check_i_mod_minus_10 = new (GetAllocator()) HBoundsCheck( i_mod_minus_10, constant_10, 0); - array_set = new (&allocator_) HArraySet( + array_set = new (GetAllocator()) HArraySet( new_array, bounds_check_i_mod_minus_10, constant_10, DataType::Type::kInt32, 0); loop_body->AddInstruction(i_mod_minus_10); loop_body->AddInstruction(bounds_check_i_mod_minus_10); loop_body->AddInstruction(array_set); // array[i%array.length] = 10; - HNullCheck* null_check = new (&allocator_) HNullCheck(new_array, 0); - HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0); - HRem* i_mod_array_length = new (&allocator_) HRem(DataType::Type::kInt32, phi, array_length, 0); - HBoundsCheck* bounds_check_i_mod_array_len = new (&allocator_) HBoundsCheck( + HNullCheck* null_check = new (GetAllocator()) HNullCheck(new_array, 0); + HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HRem* i_mod_array_length = new (GetAllocator()) HRem( + DataType::Type::kInt32, phi, array_length, 0); + HBoundsCheck* bounds_check_i_mod_array_len = new (GetAllocator()) HBoundsCheck( i_mod_array_length, array_length, 0); - array_set = new (&allocator_) HArraySet( + array_set = new (GetAllocator()) HArraySet( null_check, bounds_check_i_mod_array_len, constant_10, DataType::Type::kInt32, 0); loop_body->AddInstruction(null_check); loop_body->AddInstruction(array_length); @@ -1056,23 +1058,23 @@ TEST_F(BoundsCheckEliminationTest, ModArrayBoundsElimination) { loop_body->AddInstruction(array_set); // array[param_i % 10] = 10; - HRem* param_i_mod_10 = new (&allocator_) HRem(DataType::Type::kInt32, param_i, constant_10, 0); - HBoundsCheck* bounds_check_param_i_mod_10 = new (&allocator_) HBoundsCheck( + HRem* param_i_mod_10 = new (GetAllocator()) HRem(DataType::Type::kInt32, param_i, constant_10, 0); + HBoundsCheck* bounds_check_param_i_mod_10 = new (GetAllocator()) HBoundsCheck( param_i_mod_10, constant_10, 0); - array_set = new (&allocator_) HArraySet( + array_set = new (GetAllocator()) HArraySet( new_array, bounds_check_param_i_mod_10, constant_10, DataType::Type::kInt32, 0); loop_body->AddInstruction(param_i_mod_10); loop_body->AddInstruction(bounds_check_param_i_mod_10); loop_body->AddInstruction(array_set); // array[param_i%array.length] = 10; - null_check = new (&allocator_) HNullCheck(new_array, 0); - array_length = new (&allocator_) HArrayLength(null_check, 0); - HRem* param_i_mod_array_length = new (&allocator_) HRem( + null_check = new (GetAllocator()) HNullCheck(new_array, 0); + array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HRem* param_i_mod_array_length = new (GetAllocator()) HRem( DataType::Type::kInt32, param_i, array_length, 0); - HBoundsCheck* bounds_check_param_i_mod_array_len = new (&allocator_) HBoundsCheck( + HBoundsCheck* bounds_check_param_i_mod_array_len = new (GetAllocator()) HBoundsCheck( param_i_mod_array_length, array_length, 0); - array_set = new (&allocator_) HArraySet( + array_set = new (GetAllocator()) HArraySet( null_check, bounds_check_param_i_mod_array_len, constant_10, DataType::Type::kInt32, 0); loop_body->AddInstruction(null_check); loop_body->AddInstruction(array_length); @@ -1081,13 +1083,13 @@ TEST_F(BoundsCheckEliminationTest, ModArrayBoundsElimination) { loop_body->AddInstruction(array_set); // i++; - HInstruction* add = new (&allocator_) HAdd(DataType::Type::kInt32, phi, constant_1); + HInstruction* add = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi, constant_1); loop_body->AddInstruction(add); - loop_body->AddInstruction(new (&allocator_) HGoto()); + loop_body->AddInstruction(new (GetAllocator()) HGoto()); phi->AddInput(add); ////////////////////////////////////////////////////////////////////////////////// - exit->AddInstruction(new (&allocator_) HExit()); + exit->AddInstruction(new (GetAllocator()) HExit()); RunBCE(); diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc index 0e708ed408..76350a6d55 100644 --- a/compiler/optimizing/builder.cc +++ b/compiler/optimizing/builder.cc @@ -32,13 +32,12 @@ namespace art { HGraphBuilder::HGraphBuilder(HGraph* graph, - DexCompilationUnit* dex_compilation_unit, - const DexCompilationUnit* const outer_compilation_unit, + const DexCompilationUnit* dex_compilation_unit, + const DexCompilationUnit* outer_compilation_unit, CompilerDriver* driver, CodeGenerator* code_generator, OptimizingCompilerStats* compiler_stats, const uint8_t* interpreter_metadata, - Handle<mirror::DexCache> dex_cache, VariableSizedHandleScope* handles) : graph_(graph), dex_file_(&graph->GetDexFile()), @@ -63,7 +62,7 @@ HGraphBuilder::HGraphBuilder(HGraph* graph, code_generator, interpreter_metadata, compiler_stats, - dex_cache, + dex_compilation_unit->GetDexCache(), handles) {} bool HGraphBuilder::SkipCompilation(size_t number_of_branches) { diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h index 9524fe2534..6c5985a3de 100644 --- a/compiler/optimizing/builder.h +++ b/compiler/optimizing/builder.h @@ -36,13 +36,12 @@ class CodeGenerator; class HGraphBuilder : public ValueObject { public: HGraphBuilder(HGraph* graph, - DexCompilationUnit* dex_compilation_unit, - const DexCompilationUnit* const outer_compilation_unit, + const DexCompilationUnit* dex_compilation_unit, + const DexCompilationUnit* outer_compilation_unit, CompilerDriver* driver, CodeGenerator* code_generator, OptimizingCompilerStats* compiler_stats, const uint8_t* interpreter_metadata, - Handle<mirror::DexCache> dex_cache, VariableSizedHandleScope* handles); // Only for unit testing. @@ -89,7 +88,7 @@ class HGraphBuilder : public ValueObject { // The compilation unit of the current method being compiled. Note that // it can be an inlined method. - DexCompilationUnit* const dex_compilation_unit_; + const DexCompilationUnit* const dex_compilation_unit_; CompilerDriver* const compiler_driver_; diff --git a/compiler/optimizing/cha_guard_optimization.cc b/compiler/optimizing/cha_guard_optimization.cc index c806dbfef6..3addaeecd9 100644 --- a/compiler/optimizing/cha_guard_optimization.cc +++ b/compiler/optimizing/cha_guard_optimization.cc @@ -36,7 +36,7 @@ class CHAGuardVisitor : HGraphVisitor { : HGraphVisitor(graph), block_has_cha_guard_(GetGraph()->GetBlocks().size(), 0, - graph->GetArena()->Adapter(kArenaAllocCHA)), + graph->GetAllocator()->Adapter(kArenaAllocCHA)), instruction_iterator_(nullptr) { number_of_guards_to_visit_ = GetGraph()->GetNumberOfCHAGuards(); DCHECK_NE(number_of_guards_to_visit_, 0u); @@ -202,8 +202,8 @@ bool CHAGuardVisitor::HoistGuard(HShouldDeoptimizeFlag* flag, HInstruction* suspend = loop_info->GetSuspendCheck(); // Need a new deoptimize instruction that copies the environment // of the suspend instruction for the loop. - HDeoptimize* deoptimize = new (GetGraph()->GetArena()) HDeoptimize( - GetGraph()->GetArena(), compare, DeoptimizationKind::kCHA, suspend->GetDexPc()); + HDeoptimize* deoptimize = new (GetGraph()->GetAllocator()) HDeoptimize( + GetGraph()->GetAllocator(), compare, DeoptimizationKind::kCHA, suspend->GetDexPc()); pre_header->InsertInstructionBefore(deoptimize, pre_header->GetLastInstruction()); deoptimize->CopyEnvironmentFromWithLoopPhiAdjustment( suspend->GetEnvironment(), loop_info->GetHeader()); diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index 3cb37926af..dd8e3d240f 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -322,7 +322,7 @@ void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots, void CodeGenerator::CreateCommonInvokeLocationSummary( HInvoke* invoke, InvokeDexCallingConventionVisitor* visitor) { - ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena(); + ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator(); LocationSummary* locations = new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly); @@ -420,7 +420,7 @@ void CodeGenerator::CreateUnresolvedFieldLocationSummary( bool is_get = field_access->IsUnresolvedInstanceFieldGet() || field_access->IsUnresolvedStaticFieldGet(); - ArenaAllocator* allocator = field_access->GetBlock()->GetGraph()->GetArena(); + ArenaAllocator* allocator = field_access->GetBlock()->GetGraph()->GetAllocator(); LocationSummary* locations = new (allocator) LocationSummary(field_access, LocationSummary::kCallOnMainOnly); @@ -541,7 +541,7 @@ void CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(HLoadClass* cls, Location runtime_return_location) { DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall); DCHECK_EQ(cls->InputCount(), 1u); - LocationSummary* locations = new (cls->GetBlock()->GetGraph()->GetArena()) LocationSummary( + LocationSummary* locations = new (cls->GetBlock()->GetGraph()->GetAllocator()) LocationSummary( cls, LocationSummary::kCallOnMainOnly); locations->SetInAt(0, Location::NoLocation()); locations->AddTemp(runtime_type_index_location); @@ -617,61 +617,49 @@ std::unique_ptr<CodeGenerator> CodeGenerator::Create(HGraph* graph, const InstructionSetFeatures& isa_features, const CompilerOptions& compiler_options, OptimizingCompilerStats* stats) { - ArenaAllocator* arena = graph->GetArena(); + ArenaAllocator* allocator = graph->GetAllocator(); switch (instruction_set) { #ifdef ART_ENABLE_CODEGEN_arm case kArm: case kThumb2: { return std::unique_ptr<CodeGenerator>( - new (arena) arm::CodeGeneratorARMVIXL(graph, - *isa_features.AsArmInstructionSetFeatures(), - compiler_options, - stats)); + new (allocator) arm::CodeGeneratorARMVIXL( + graph, *isa_features.AsArmInstructionSetFeatures(), compiler_options, stats)); } #endif #ifdef ART_ENABLE_CODEGEN_arm64 case kArm64: { return std::unique_ptr<CodeGenerator>( - new (arena) arm64::CodeGeneratorARM64(graph, - *isa_features.AsArm64InstructionSetFeatures(), - compiler_options, - stats)); + new (allocator) arm64::CodeGeneratorARM64( + graph, *isa_features.AsArm64InstructionSetFeatures(), compiler_options, stats)); } #endif #ifdef ART_ENABLE_CODEGEN_mips case kMips: { return std::unique_ptr<CodeGenerator>( - new (arena) mips::CodeGeneratorMIPS(graph, - *isa_features.AsMipsInstructionSetFeatures(), - compiler_options, - stats)); + new (allocator) mips::CodeGeneratorMIPS( + graph, *isa_features.AsMipsInstructionSetFeatures(), compiler_options, stats)); } #endif #ifdef ART_ENABLE_CODEGEN_mips64 case kMips64: { return std::unique_ptr<CodeGenerator>( - new (arena) mips64::CodeGeneratorMIPS64(graph, - *isa_features.AsMips64InstructionSetFeatures(), - compiler_options, - stats)); + new (allocator) mips64::CodeGeneratorMIPS64( + graph, *isa_features.AsMips64InstructionSetFeatures(), compiler_options, stats)); } #endif #ifdef ART_ENABLE_CODEGEN_x86 case kX86: { return std::unique_ptr<CodeGenerator>( - new (arena) x86::CodeGeneratorX86(graph, - *isa_features.AsX86InstructionSetFeatures(), - compiler_options, - stats)); + new (allocator) x86::CodeGeneratorX86( + graph, *isa_features.AsX86InstructionSetFeatures(), compiler_options, stats)); } #endif #ifdef ART_ENABLE_CODEGEN_x86_64 case kX86_64: { return std::unique_ptr<CodeGenerator>( - new (arena) x86_64::CodeGeneratorX86_64(graph, - *isa_features.AsX86_64InstructionSetFeatures(), - compiler_options, - stats)); + new (allocator) x86_64::CodeGeneratorX86_64( + graph, *isa_features.AsX86_64InstructionSetFeatures(), compiler_options, stats)); } #endif default: @@ -712,7 +700,7 @@ static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph, // One can write loops through try/catch, which we do not support for OSR anyway. return; } - ArenaVector<HSuspendCheck*> loop_headers(graph.GetArena()->Adapter(kArenaAllocMisc)); + ArenaVector<HSuspendCheck*> loop_headers(graph.GetAllocator()->Adapter(kArenaAllocMisc)); for (HBasicBlock* block : graph.GetReversePostOrder()) { if (block->IsLoopHeader()) { HSuspendCheck* suspend_check = block->GetLoopInformation()->GetSuspendCheck(); @@ -721,7 +709,8 @@ static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph, } } } - ArenaVector<size_t> covered(loop_headers.size(), 0, graph.GetArena()->Adapter(kArenaAllocMisc)); + ArenaVector<size_t> covered( + loop_headers.size(), 0, graph.GetAllocator()->Adapter(kArenaAllocMisc)); IterationRange<DexInstructionIterator> instructions = code_item.Instructions(); for (auto it = instructions.begin(); it != instructions.end(); ++it) { const uint32_t dex_pc = it.GetDexPC(instructions.begin()); @@ -909,7 +898,7 @@ void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction, } void CodeGenerator::RecordCatchBlockInfo() { - ArenaAllocator* arena = graph_->GetArena(); + ArenaAllocator* allocator = graph_->GetAllocator(); for (HBasicBlock* block : *block_order_) { if (!block->IsCatchBlock()) { @@ -924,7 +913,7 @@ void CodeGenerator::RecordCatchBlockInfo() { // The stack mask is not used, so we leave it empty. ArenaBitVector* stack_mask = - ArenaBitVector::Create(arena, 0, /* expandable */ true, kArenaAllocCodeGenerator); + ArenaBitVector::Create(allocator, 0, /* expandable */ true, kArenaAllocCodeGenerator); stack_map_stream_.BeginStackMapEntry(dex_pc, native_pc, @@ -1194,7 +1183,8 @@ LocationSummary* CodeGenerator::CreateThrowingSlowPathLocations(HInstruction* in if (can_throw_into_catch_block) { call_kind = LocationSummary::kCallOnSlowPath; } - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind); if (can_throw_into_catch_block && compiler_options_.GetImplicitNullChecks()) { locations->SetCustomSlowPathCallerSaves(caller_saves); // Default: no caller-save registers. } @@ -1237,7 +1227,7 @@ void CodeGenerator::EmitParallelMoves(Location from1, Location from2, Location to2, DataType::Type type2) { - HParallelMove parallel_move(GetGraph()->GetArena()); + HParallelMove parallel_move(GetGraph()->GetAllocator()); parallel_move.AddMove(from1, to1, type1, nullptr); parallel_move.AddMove(from2, to2, type2, nullptr); GetMoveResolver()->EmitNativeCode(¶llel_move); @@ -1400,7 +1390,7 @@ void CodeGenerator::CreateSystemArrayCopyLocationSummary(HInvoke* invoke) { return; } - ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena(); + ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator(); LocationSummary* locations = new (allocator) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified); diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index ac3c8394e6..2c3cf262b1 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -605,26 +605,26 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { fpu_spill_mask_(0), first_register_slot_in_slow_path_(0), allocated_registers_(RegisterSet::Empty()), - blocked_core_registers_(graph->GetArena()->AllocArray<bool>(number_of_core_registers, - kArenaAllocCodeGenerator)), - blocked_fpu_registers_(graph->GetArena()->AllocArray<bool>(number_of_fpu_registers, - kArenaAllocCodeGenerator)), + blocked_core_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_core_registers, + kArenaAllocCodeGenerator)), + blocked_fpu_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_fpu_registers, + kArenaAllocCodeGenerator)), number_of_core_registers_(number_of_core_registers), number_of_fpu_registers_(number_of_fpu_registers), number_of_register_pairs_(number_of_register_pairs), core_callee_save_mask_(core_callee_save_mask), fpu_callee_save_mask_(fpu_callee_save_mask), - stack_map_stream_(graph->GetArena(), graph->GetInstructionSet()), + stack_map_stream_(graph->GetAllocator(), graph->GetInstructionSet()), block_order_(nullptr), jit_string_roots_(StringReferenceValueComparator(), - graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), + graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), jit_class_roots_(TypeReferenceValueComparator(), - graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), + graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), disasm_info_(nullptr), stats_(stats), graph_(graph), compiler_options_(compiler_options), - slow_paths_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), + slow_paths_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), current_slow_path_(nullptr), current_block_index_(0), is_leaf_(true), @@ -668,8 +668,8 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { // We use raw array allocations instead of ArenaVector<> because Labels are // non-constructible and non-movable and as such cannot be held in a vector. size_t size = GetGraph()->GetBlocks().size(); - LabelType* labels = GetGraph()->GetArena()->AllocArray<LabelType>(size, - kArenaAllocCodeGenerator); + LabelType* labels = + GetGraph()->GetAllocator()->AllocArray<LabelType>(size, kArenaAllocCodeGenerator); for (size_t i = 0; i != size; ++i) { new(labels + i) LabelType(); } @@ -823,7 +823,8 @@ class SlowPathGenerator { SlowPathGenerator(HGraph* graph, CodeGenerator* codegen) : graph_(graph), codegen_(codegen), - slow_path_map_(std::less<uint32_t>(), graph->GetArena()->Adapter(kArenaAllocSlowPaths)) {} + slow_path_map_(std::less<uint32_t>(), + graph->GetAllocator()->Adapter(kArenaAllocSlowPaths)) {} // Creates and adds a new slow-path, if needed, or returns existing one otherwise. // Templating the method (rather than the whole class) on the slow-path type enables @@ -857,10 +858,11 @@ class SlowPathGenerator { } } else { // First time this dex-pc is seen. - iter = slow_path_map_.Put(dex_pc, {{}, {graph_->GetArena()->Adapter(kArenaAllocSlowPaths)}}); + iter = slow_path_map_.Put(dex_pc, + {{}, {graph_->GetAllocator()->Adapter(kArenaAllocSlowPaths)}}); } // Cannot share: create and add new slow-path for this particular dex-pc. - SlowPathCodeType* slow_path = new (graph_->GetArena()) SlowPathCodeType(instruction); + SlowPathCodeType* slow_path = new (graph_->GetAllocator()) SlowPathCodeType(instruction); iter->second.emplace_back(std::make_pair(instruction, slow_path)); codegen_->AddSlowPath(slow_path); return slow_path; diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 468e93a8c0..9be9117967 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -620,7 +620,7 @@ class ArraySetSlowPathARM64 : public SlowPathCodeARM64 { SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; - HParallelMove parallel_move(codegen->GetGraph()->GetArena()); + HParallelMove parallel_move(codegen->GetGraph()->GetAllocator()); parallel_move.AddMove( locations->InAt(0), LocationFrom(calling_convention.GetRegisterAt(0)), @@ -1294,7 +1294,7 @@ class ReadBarrierForHeapReferenceSlowPathARM64 : public SlowPathCodeARM64 { // We're moving two or three locations to locations that could // overlap, so we need a parallel move resolver. InvokeRuntimeCallingConvention calling_convention; - HParallelMove parallel_move(codegen->GetGraph()->GetArena()); + HParallelMove parallel_move(codegen->GetGraph()->GetAllocator()); parallel_move.AddMove(ref_, LocationFrom(calling_convention.GetRegisterAt(0)), type, @@ -1453,28 +1453,28 @@ CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph, callee_saved_fp_registers.GetList(), compiler_options, stats), - block_labels_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), + block_labels_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + jump_tables_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), location_builder_(graph, this), instruction_visitor_(graph, this), - move_resolver_(graph->GetArena(), this), - assembler_(graph->GetArena()), + move_resolver_(graph->GetAllocator(), this), + assembler_(graph->GetAllocator()), isa_features_(isa_features), uint32_literals_(std::less<uint32_t>(), - graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), + graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), uint64_literals_(std::less<uint64_t>(), - graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - pc_relative_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - string_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - baker_read_barrier_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), + graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + pc_relative_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + pc_relative_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + pc_relative_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + baker_read_barrier_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), jit_string_patches_(StringReferenceValueComparator(), - graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), + graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), jit_class_patches_(TypeReferenceValueComparator(), - graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) { + graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) { // Save the link register (containing the return address) to mimic Quick. AddAllocatedRegister(LocationFrom(lr)); } @@ -2204,7 +2204,7 @@ void InstructionCodeGeneratorARM64::GenerateSuspendCheck(HSuspendCheck* instruct SuspendCheckSlowPathARM64* slow_path = down_cast<SuspendCheckSlowPathARM64*>(instruction->GetSlowPath()); if (slow_path == nullptr) { - slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARM64(instruction, successor); + slow_path = new (GetGraph()->GetAllocator()) SuspendCheckSlowPathARM64(instruction, successor); instruction->SetSlowPath(slow_path); codegen_->AddSlowPath(slow_path); if (successor != nullptr) { @@ -2235,36 +2235,9 @@ InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph, assembler_(codegen->GetAssembler()), codegen_(codegen) {} -#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M) \ - /* No unimplemented IR. */ - -#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode - -enum UnimplementedInstructionBreakCode { - // Using a base helps identify when we hit such breakpoints. - UnimplementedInstructionBreakCodeBaseCode = 0x900, -#define ENUM_UNIMPLEMENTED_INSTRUCTION(name) UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name), - FOR_EACH_UNIMPLEMENTED_INSTRUCTION(ENUM_UNIMPLEMENTED_INSTRUCTION) -#undef ENUM_UNIMPLEMENTED_INSTRUCTION -}; - -#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name) \ - void InstructionCodeGeneratorARM64::Visit##name(H##name* instr ATTRIBUTE_UNUSED) { \ - __ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name)); \ - } \ - void LocationsBuilderARM64::Visit##name(H##name* instr) { \ - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); \ - locations->SetOut(Location::Any()); \ - } - FOR_EACH_UNIMPLEMENTED_INSTRUCTION(DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS) -#undef DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS - -#undef UNIMPLEMENTED_INSTRUCTION_BREAK_CODE -#undef FOR_EACH_UNIMPLEMENTED_INSTRUCTION - void LocationsBuilderARM64::HandleBinaryOp(HBinaryOperation* instr) { DCHECK_EQ(instr->InputCount(), 2U); - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instr); DataType::Type type = instr->GetResultType(); switch (type) { case DataType::Type::kInt32: @@ -2293,10 +2266,10 @@ void LocationsBuilderARM64::HandleFieldGet(HInstruction* instruction, bool object_field_get_with_read_barrier = kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, - object_field_get_with_read_barrier ? - LocationSummary::kCallOnSlowPath : - LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, + object_field_get_with_read_barrier + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall); if (object_field_get_with_read_barrier && kUseBakerReadBarrier) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. // We need a temporary register for the read barrier marking slow @@ -2378,7 +2351,7 @@ void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction, void LocationsBuilderARM64::HandleFieldSet(HInstruction* instruction) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); if (IsConstantZeroBitPattern(instruction->InputAt(1))) { locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant())); @@ -2485,7 +2458,7 @@ void InstructionCodeGeneratorARM64::HandleBinaryOp(HBinaryOperation* instr) { void LocationsBuilderARM64::HandleShift(HBinaryOperation* instr) { DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr()); - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instr); DataType::Type type = instr->GetResultType(); switch (type) { case DataType::Type::kInt32: @@ -2556,7 +2529,7 @@ void InstructionCodeGeneratorARM64::VisitAnd(HAnd* instruction) { void LocationsBuilderARM64::VisitBitwiseNegatedRight(HBitwiseNegatedRight* instr) { DCHECK(DataType::IsIntegralType(instr->GetType())) << instr->GetType(); - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instr); locations->SetInAt(0, Location::RequiresRegister()); // There is no immediate variant of negated bitwise instructions in AArch64. locations->SetInAt(1, Location::RequiresRegister()); @@ -2588,7 +2561,7 @@ void LocationsBuilderARM64::VisitDataProcWithShifterOp( DCHECK(instruction->GetType() == DataType::Type::kInt32 || instruction->GetType() == DataType::Type::kInt64); LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); if (instruction->GetInstrKind() == HInstruction::kNeg) { locations->SetInAt(0, Location::ConstantLocation(instruction->InputAt(0)->AsConstant())); } else { @@ -2659,7 +2632,7 @@ void InstructionCodeGeneratorARM64::VisitDataProcWithShifterOp( void LocationsBuilderARM64::VisitIntermediateAddress(HIntermediateAddress* instruction) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->GetOffset(), instruction)); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); @@ -2673,7 +2646,7 @@ void InstructionCodeGeneratorARM64::VisitIntermediateAddress(HIntermediateAddres void LocationsBuilderARM64::VisitIntermediateAddressIndex(HIntermediateAddressIndex* instruction) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); HIntConstant* shift = instruction->GetShift()->AsIntConstant(); @@ -2705,7 +2678,7 @@ void InstructionCodeGeneratorARM64::VisitIntermediateAddressIndex( void LocationsBuilderARM64::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instr, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instr, LocationSummary::kNoCall); HInstruction* accumulator = instr->InputAt(HMultiplyAccumulate::kInputAccumulatorIndex); if (instr->GetOpKind() == HInstruction::kSub && accumulator->IsConstant() && @@ -2759,10 +2732,10 @@ void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) { bool object_array_get_with_read_barrier = kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, - object_array_get_with_read_barrier ? - LocationSummary::kCallOnSlowPath : - LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, + object_array_get_with_read_barrier + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall); if (object_array_get_with_read_barrier && kUseBakerReadBarrier) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. // We need a temporary register for the read barrier marking slow @@ -2929,7 +2902,7 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) { } void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); } @@ -2953,7 +2926,7 @@ void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) { DataType::Type value_type = instruction->GetComponentType(); bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck(); - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( instruction, may_need_runtime_call_for_type_check ? LocationSummary::kCallOnSlowPath : @@ -3039,7 +3012,7 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) { uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); if (may_need_runtime_call_for_type_check) { - slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathARM64(instruction); + slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathARM64(instruction); codegen_->AddSlowPath(slow_path); if (instruction->GetValueCanBeNull()) { vixl::aarch64::Label non_zero; @@ -3154,7 +3127,7 @@ void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) { void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) { BoundsCheckSlowPathARM64* slow_path = - new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64(instruction); + new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARM64(instruction); codegen_->AddSlowPath(slow_path); __ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1)); __ B(slow_path->GetEntryLabel(), hs); @@ -3162,7 +3135,7 @@ void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) void LocationsBuilderARM64::VisitClinitCheck(HClinitCheck* check) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath); + new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath); locations->SetInAt(0, Location::RequiresRegister()); if (check->HasUses()) { locations->SetOut(Location::SameAsFirstInput()); @@ -3171,7 +3144,7 @@ void LocationsBuilderARM64::VisitClinitCheck(HClinitCheck* check) { void InstructionCodeGeneratorARM64::VisitClinitCheck(HClinitCheck* check) { // We assume the class is not null. - SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64( + SlowPathCodeARM64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathARM64( check->GetLoadClass(), check, check->GetDexPc(), true); codegen_->AddSlowPath(slow_path); GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0)); @@ -3210,7 +3183,7 @@ void InstructionCodeGeneratorARM64::GenerateFcmp(HInstruction* instruction) { void LocationsBuilderARM64::VisitCompare(HCompare* compare) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(compare, LocationSummary::kNoCall); DataType::Type in_type = compare->InputAt(0)->GetType(); switch (in_type) { case DataType::Type::kBool: @@ -3276,7 +3249,7 @@ void InstructionCodeGeneratorARM64::VisitCompare(HCompare* compare) { } void LocationsBuilderARM64::HandleCondition(HCondition* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); if (DataType::IsFloatingPointType(instruction->InputAt(0)->GetType())) { locations->SetInAt(0, Location::RequiresFpuRegister()); @@ -3482,7 +3455,7 @@ void InstructionCodeGeneratorARM64::GenerateDivRemIntegral(HBinaryOperation* ins void LocationsBuilderARM64::VisitDiv(HDiv* div) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(div, LocationSummary::kNoCall); switch (div->GetResultType()) { case DataType::Type::kInt32: case DataType::Type::kInt64: @@ -3528,7 +3501,7 @@ void LocationsBuilderARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) { void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) { SlowPathCodeARM64* slow_path = - new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM64(instruction); + new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathARM64(instruction); codegen_->AddSlowPath(slow_path); Location value = instruction->GetLocations()->InAt(0); @@ -3554,7 +3527,7 @@ void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction void LocationsBuilderARM64::VisitDoubleConstant(HDoubleConstant* constant) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall); locations->SetOut(Location::ConstantLocation(constant)); } @@ -3572,7 +3545,7 @@ void InstructionCodeGeneratorARM64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) { void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall); locations->SetOut(Location::ConstantLocation(constant)); } @@ -3726,7 +3699,7 @@ void InstructionCodeGeneratorARM64::GenerateTestAndBranch(HInstruction* instruct } void LocationsBuilderARM64::VisitIf(HIf* if_instr) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr); if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) { locations->SetInAt(0, Location::RequiresRegister()); } @@ -3747,7 +3720,7 @@ void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) { } void LocationsBuilderARM64::VisitDeoptimize(HDeoptimize* deoptimize) { - LocationSummary* locations = new (GetGraph()->GetArena()) + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath); InvokeRuntimeCallingConvention calling_convention; RegisterSet caller_saves = RegisterSet::Empty(); @@ -3768,7 +3741,7 @@ void InstructionCodeGeneratorARM64::VisitDeoptimize(HDeoptimize* deoptimize) { } void LocationsBuilderARM64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) { - LocationSummary* locations = new (GetGraph()->GetArena()) + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(flag, LocationSummary::kNoCall); locations->SetOut(Location::RequiresRegister()); } @@ -3790,7 +3763,7 @@ static inline Condition GetConditionForSelect(HCondition* condition) { } void LocationsBuilderARM64::VisitSelect(HSelect* select) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select); if (DataType::IsFloatingPointType(select->GetType())) { locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); @@ -3859,7 +3832,7 @@ void InstructionCodeGeneratorARM64::VisitSelect(HSelect* select) { } void LocationsBuilderARM64::VisitNativeDebugInfo(HNativeDebugInfo* info) { - new (GetGraph()->GetArena()) LocationSummary(info); + new (GetGraph()->GetAllocator()) LocationSummary(info); } void InstructionCodeGeneratorARM64::VisitNativeDebugInfo(HNativeDebugInfo*) { @@ -3928,7 +3901,8 @@ void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) { break; } - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind); if (baker_read_barrier_slow_path) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } @@ -4083,8 +4057,8 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { kWithoutReadBarrier); __ Cmp(out, cls); DCHECK(locations->OnlyCallsOnSlowPath()); - slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction, - /* is_fatal */ false); + slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARM64(instruction, + /* is_fatal */ false); codegen_->AddSlowPath(slow_path); __ B(ne, slow_path->GetEntryLabel()); __ Mov(out, 1); @@ -4115,8 +4089,8 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { // call to the runtime not using a type checking slow path). // This should also be beneficial for the other cases above. DCHECK(locations->OnlyCallsOnSlowPath()); - slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction, - /* is_fatal */ false); + slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARM64(instruction, + /* is_fatal */ false); codegen_->AddSlowPath(slow_path); __ B(slow_path->GetEntryLabel()); if (zero.IsLinked()) { @@ -4161,7 +4135,8 @@ void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) { break; } - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); // Add temps for read barriers and other uses. One is used by TypeCheckSlowPathARM64. @@ -4203,8 +4178,8 @@ void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) { !instruction->CanThrowIntoCatchBlock(); } SlowPathCodeARM64* type_check_slow_path = - new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction, - is_type_check_slow_path_fatal); + new (GetGraph()->GetAllocator()) TypeCheckSlowPathARM64(instruction, + is_type_check_slow_path_fatal); codegen_->AddSlowPath(type_check_slow_path); vixl::aarch64::Label done; @@ -4372,7 +4347,7 @@ void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) { } void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant); locations->SetOut(Location::ConstantLocation(constant)); } @@ -4381,7 +4356,7 @@ void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant ATTR } void LocationsBuilderARM64::VisitNullConstant(HNullConstant* constant) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant); locations->SetOut(Location::ConstantLocation(constant)); } @@ -4472,7 +4447,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok } void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) { - IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena(), codegen_); + IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetAllocator(), codegen_); if (intrinsic.TryDispatch(invoke)) { return; } @@ -4485,7 +4460,7 @@ void LocationsBuilderARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* inv // art::PrepareForRegisterAllocation. DCHECK(!invoke->IsStaticWithExplicitClinitCheck()); - IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena(), codegen_); + IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetAllocator(), codegen_); if (intrinsic.TryDispatch(invoke)) { return; } @@ -4896,7 +4871,7 @@ void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) { LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier) ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind); if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } @@ -5037,7 +5012,7 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA bool do_clinit = cls->MustGenerateClinitCheck(); if (generate_null_check || do_clinit) { DCHECK(cls->CanCallRuntime()); - SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64( + SlowPathCodeARM64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathARM64( cls, cls, cls->GetDexPc(), do_clinit, bss_entry_temp, bss_entry_adrp_label); codegen_->AddSlowPath(slow_path); if (generate_null_check) { @@ -5058,7 +5033,7 @@ static MemOperand GetExceptionTlsAddress() { void LocationsBuilderARM64::VisitLoadException(HLoadException* load) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall); locations->SetOut(Location::RequiresRegister()); } @@ -5067,7 +5042,7 @@ void InstructionCodeGeneratorARM64::VisitLoadException(HLoadException* instructi } void LocationsBuilderARM64::VisitClearException(HClearException* clear) { - new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall); } void InstructionCodeGeneratorARM64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) { @@ -5094,7 +5069,7 @@ HLoadString::LoadKind CodeGeneratorARM64::GetSupportedLoadStringKind( void LocationsBuilderARM64::VisitLoadString(HLoadString* load) { LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load); - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind); if (load->GetLoadKind() == HLoadString::LoadKind::kRuntimeCall) { InvokeRuntimeCallingConvention calling_convention; locations->SetOut(calling_convention.GetReturnLocation(load->GetType())); @@ -5177,7 +5152,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD ldr_label, kCompilerReadBarrierOption); SlowPathCodeARM64* slow_path = - new (GetGraph()->GetArena()) LoadStringSlowPathARM64(load, temp, adrp_label); + new (GetGraph()->GetAllocator()) LoadStringSlowPathARM64(load, temp, adrp_label); codegen_->AddSlowPath(slow_path); __ Cbz(out.X(), slow_path->GetEntryLabel()); __ Bind(slow_path->GetExitLabel()); @@ -5210,7 +5185,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD } void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant); locations->SetOut(Location::ConstantLocation(constant)); } @@ -5219,8 +5194,8 @@ void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant AT } void LocationsBuilderARM64::VisitMonitorOperation(HMonitorOperation* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); } @@ -5239,7 +5214,7 @@ void InstructionCodeGeneratorARM64::VisitMonitorOperation(HMonitorOperation* ins void LocationsBuilderARM64::VisitMul(HMul* mul) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall); switch (mul->GetResultType()) { case DataType::Type::kInt32: case DataType::Type::kInt64: @@ -5279,7 +5254,7 @@ void InstructionCodeGeneratorARM64::VisitMul(HMul* mul) { void LocationsBuilderARM64::VisitNeg(HNeg* neg) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall); switch (neg->GetResultType()) { case DataType::Type::kInt32: case DataType::Type::kInt64: @@ -5316,8 +5291,8 @@ void InstructionCodeGeneratorARM64::VisitNeg(HNeg* neg) { } void LocationsBuilderARM64::VisitNewArray(HNewArray* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); InvokeRuntimeCallingConvention calling_convention; locations->SetOut(LocationFrom(x0)); locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); @@ -5335,8 +5310,8 @@ void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) { } void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); InvokeRuntimeCallingConvention calling_convention; if (instruction->IsStringAlloc()) { locations->AddTemp(LocationFrom(kArtMethodRegister)); @@ -5372,7 +5347,7 @@ void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) } void LocationsBuilderARM64::VisitNot(HNot* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); } @@ -5390,7 +5365,7 @@ void InstructionCodeGeneratorARM64::VisitNot(HNot* instruction) { } void LocationsBuilderARM64::VisitBooleanNot(HBooleanNot* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); } @@ -5418,7 +5393,8 @@ void CodeGeneratorARM64::GenerateImplicitNullCheck(HNullCheck* instruction) { } void CodeGeneratorARM64::GenerateExplicitNullCheck(HNullCheck* instruction) { - SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM64(instruction); + SlowPathCodeARM64* slow_path = + new (GetGraph()->GetAllocator()) NullCheckSlowPathARM64(instruction); AddSlowPath(slow_path); LocationSummary* locations = instruction->GetLocations(); @@ -5448,7 +5424,7 @@ void InstructionCodeGeneratorARM64::VisitParallelMove(HParallelMove* instruction } void LocationsBuilderARM64::VisitParameterValue(HParameterValue* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); Location location = parameter_visitor_.GetNextLocation(instruction->GetType()); if (location.IsStackSlot()) { location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize()); @@ -5465,7 +5441,7 @@ void InstructionCodeGeneratorARM64::VisitParameterValue( void LocationsBuilderARM64::VisitCurrentMethod(HCurrentMethod* instruction) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); locations->SetOut(LocationFrom(kArtMethodRegister)); } @@ -5475,7 +5451,7 @@ void InstructionCodeGeneratorARM64::VisitCurrentMethod( } void LocationsBuilderARM64::VisitPhi(HPhi* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) { locations->SetInAt(i, Location::Any()); } @@ -5491,7 +5467,7 @@ void LocationsBuilderARM64::VisitRem(HRem* rem) { LocationSummary::CallKind call_kind = DataType::IsFloatingPointType(type) ? LocationSummary::kCallOnMainOnly : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(rem, call_kind); switch (type) { case DataType::Type::kInt32: @@ -5563,7 +5539,7 @@ void InstructionCodeGeneratorARM64::VisitMemoryBarrier(HMemoryBarrier* memory_ba } void LocationsBuilderARM64::VisitReturn(HReturn* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); DataType::Type return_type = instruction->InputAt(0)->GetType(); locations->SetInAt(0, ARM64ReturnLocation(return_type)); } @@ -5697,8 +5673,8 @@ void InstructionCodeGeneratorARM64::VisitUnresolvedStaticFieldSet( } void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnSlowPath); // In suspend check slow path, usually there are no caller-save registers at all. // If SIMD instructions are present, however, we force spilling all live SIMD // registers in full width (since the runtime only saves/restores lower part). @@ -5722,8 +5698,8 @@ void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction } void LocationsBuilderARM64::VisitThrow(HThrow* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); } @@ -5735,7 +5711,7 @@ void InstructionCodeGeneratorARM64::VisitThrow(HThrow* instruction) { void LocationsBuilderARM64::VisitTypeConversion(HTypeConversion* conversion) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(conversion, LocationSummary::kNoCall); DataType::Type input_type = conversion->GetInputType(); DataType::Type result_type = conversion->GetResultType(); DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type)) @@ -5829,7 +5805,7 @@ void InstructionCodeGeneratorARM64::VisitBoundType(HBoundType* instruction ATTRI // Simple implementation of packed switch - generate cascaded compare/jumps. void LocationsBuilderARM64::VisitPackedSwitch(HPackedSwitch* switch_instr) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); } @@ -6053,7 +6029,7 @@ void InstructionCodeGeneratorARM64::GenerateGcRootFieldLoad( // Slow path marking the GC root `root`. The entrypoint will // be loaded by the slow path code. SlowPathCodeARM64* slow_path = - new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARM64(instruction, root); + new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathARM64(instruction, root); codegen_->AddSlowPath(slow_path); // /* GcRoot<mirror::Object> */ root = *(obj + offset) @@ -6312,7 +6288,7 @@ void CodeGeneratorARM64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* // Slow path marking the object `ref` when the GC is marking. The // entrypoint will be loaded by the slow path code. SlowPathCodeARM64* slow_path = - new (GetGraph()->GetArena()) LoadReferenceWithBakerReadBarrierSlowPathARM64( + new (GetGraph()->GetAllocator()) LoadReferenceWithBakerReadBarrierSlowPathARM64( instruction, ref, obj, @@ -6370,7 +6346,7 @@ void CodeGeneratorARM64::UpdateReferenceFieldWithBakerReadBarrier(HInstruction* // Slow path updating the object reference at address `obj + field_offset` // when the GC is marking. The entrypoint will be loaded by the slow path code. SlowPathCodeARM64* slow_path = - new (GetGraph()->GetArena()) LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARM64( + new (GetGraph()->GetAllocator()) LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARM64( instruction, ref, obj, @@ -6497,7 +6473,7 @@ void CodeGeneratorARM64::GenerateReadBarrierSlow(HInstruction* instruction, // not used by the artReadBarrierSlow entry point. // // TODO: Unpoison `ref` when it is used by artReadBarrierSlow. - SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) + SlowPathCodeARM64* slow_path = new (GetGraph()->GetAllocator()) ReadBarrierForHeapReferenceSlowPathARM64(instruction, out, ref, obj, offset, index); AddSlowPath(slow_path); @@ -6533,7 +6509,7 @@ void CodeGeneratorARM64::GenerateReadBarrierForRootSlow(HInstruction* instructio // Note that GC roots are not affected by heap poisoning, so we do // not need to do anything special for this here. SlowPathCodeARM64* slow_path = - new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathARM64(instruction, out, root); + new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathARM64(instruction, out, root); AddSlowPath(slow_path); __ B(slow_path->GetEntryLabel()); @@ -6542,7 +6518,7 @@ void CodeGeneratorARM64::GenerateReadBarrierForRootSlow(HInstruction* instructio void LocationsBuilderARM64::VisitClassTableGet(HClassTableGet* instruction) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister()); } diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h index 21da9557e5..e53773c73d 100644 --- a/compiler/optimizing/code_generator_arm64.h +++ b/compiler/optimizing/code_generator_arm64.h @@ -489,7 +489,7 @@ class CodeGeneratorARM64 : public CodeGenerator { uint32_t GetPreferredSlotsAlignment() const OVERRIDE { return vixl::aarch64::kXRegSizeInBytes; } JumpTableARM64* CreateJumpTable(HPackedSwitch* switch_instr) { - jump_tables_.emplace_back(new (GetGraph()->GetArena()) JumpTableARM64(switch_instr)); + jump_tables_.emplace_back(new (GetGraph()->GetAllocator()) JumpTableARM64(switch_instr)); return jump_tables_.back().get(); } diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index d4fb064107..d7137a3b28 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -763,7 +763,7 @@ class ArraySetSlowPathARMVIXL : public SlowPathCodeARMVIXL { SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConventionARMVIXL calling_convention; - HParallelMove parallel_move(codegen->GetGraph()->GetArena()); + HParallelMove parallel_move(codegen->GetGraph()->GetAllocator()); parallel_move.AddMove( locations->InAt(0), LocationFrom(calling_convention.GetRegisterAt(0)), @@ -1414,7 +1414,7 @@ class ReadBarrierForHeapReferenceSlowPathARMVIXL : public SlowPathCodeARMVIXL { // We're moving two or three locations to locations that could // overlap, so we need a parallel move resolver. InvokeRuntimeCallingConventionARMVIXL calling_convention; - HParallelMove parallel_move(codegen->GetGraph()->GetArena()); + HParallelMove parallel_move(codegen->GetGraph()->GetAllocator()); parallel_move.AddMove(ref_, LocationFrom(calling_convention.GetRegisterAt(0)), DataType::Type::kReference, @@ -2421,26 +2421,26 @@ CodeGeneratorARMVIXL::CodeGeneratorARMVIXL(HGraph* graph, ComputeSRegisterListMask(kFpuCalleeSaves), compiler_options, stats), - block_labels_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), + block_labels_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + jump_tables_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), location_builder_(graph, this), instruction_visitor_(graph, this), - move_resolver_(graph->GetArena(), this), - assembler_(graph->GetArena()), + move_resolver_(graph->GetAllocator(), this), + assembler_(graph->GetAllocator()), isa_features_(isa_features), uint32_literals_(std::less<uint32_t>(), - graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - pc_relative_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - string_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - baker_read_barrier_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), + graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + pc_relative_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + pc_relative_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + pc_relative_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + baker_read_barrier_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), jit_string_patches_(StringReferenceValueComparator(), - graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), + graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), jit_class_patches_(TypeReferenceValueComparator(), - graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) { + graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) { // Always save the LR register to mimic Quick. AddAllocatedRegister(Location::RegisterLocation(LR)); // Give D30 and D31 as scratch register to VIXL. The register allocator only works on @@ -2810,7 +2810,7 @@ void CodeGeneratorARMVIXL::MoveConstant(Location location, int32_t value) { void CodeGeneratorARMVIXL::MoveLocation(Location dst, Location src, DataType::Type dst_type) { // TODO(VIXL): Maybe refactor to have the 'move' implementation here and use it in // `ParallelMoveResolverARMVIXL::EmitMove`, as is done in the `arm64` backend. - HParallelMove move(GetGraph()->GetArena()); + HParallelMove move(GetGraph()->GetAllocator()); move.AddMove(src, dst, dst_type, nullptr); GetMoveResolver()->EmitNativeCode(&move); } @@ -3030,7 +3030,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateTestAndBranch(HInstruction* instru } void LocationsBuilderARMVIXL::VisitIf(HIf* if_instr) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr); if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) { locations->SetInAt(0, Location::RequiresRegister()); } @@ -3047,7 +3047,7 @@ void InstructionCodeGeneratorARMVIXL::VisitIf(HIf* if_instr) { } void LocationsBuilderARMVIXL::VisitDeoptimize(HDeoptimize* deoptimize) { - LocationSummary* locations = new (GetGraph()->GetArena()) + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath); InvokeRuntimeCallingConventionARMVIXL calling_convention; RegisterSet caller_saves = RegisterSet::Empty(); @@ -3068,7 +3068,7 @@ void InstructionCodeGeneratorARMVIXL::VisitDeoptimize(HDeoptimize* deoptimize) { } void LocationsBuilderARMVIXL::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) { - LocationSummary* locations = new (GetGraph()->GetArena()) + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(flag, LocationSummary::kNoCall); locations->SetOut(Location::RequiresRegister()); } @@ -3081,7 +3081,7 @@ void InstructionCodeGeneratorARMVIXL::VisitShouldDeoptimizeFlag(HShouldDeoptimiz } void LocationsBuilderARMVIXL::VisitSelect(HSelect* select) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select); const bool is_floating_point = DataType::IsFloatingPointType(select->GetType()); if (is_floating_point) { @@ -3222,7 +3222,7 @@ void InstructionCodeGeneratorARMVIXL::VisitSelect(HSelect* select) { } void LocationsBuilderARMVIXL::VisitNativeDebugInfo(HNativeDebugInfo* info) { - new (GetGraph()->GetArena()) LocationSummary(info); + new (GetGraph()->GetAllocator()) LocationSummary(info); } void InstructionCodeGeneratorARMVIXL::VisitNativeDebugInfo(HNativeDebugInfo*) { @@ -3312,7 +3312,7 @@ void CodeGeneratorARMVIXL::GenerateConditionWithZero(IfCondition condition, void LocationsBuilderARMVIXL::HandleCondition(HCondition* cond) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(cond, LocationSummary::kNoCall); // Handle the long/FP comparisons made in instruction simplification. switch (cond->InputAt(0)->GetType()) { case DataType::Type::kInt64: @@ -3471,7 +3471,7 @@ void InstructionCodeGeneratorARMVIXL::VisitAboveOrEqual(HAboveOrEqual* comp) { void LocationsBuilderARMVIXL::VisitIntConstant(HIntConstant* constant) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall); locations->SetOut(Location::ConstantLocation(constant)); } @@ -3481,7 +3481,7 @@ void InstructionCodeGeneratorARMVIXL::VisitIntConstant(HIntConstant* constant AT void LocationsBuilderARMVIXL::VisitNullConstant(HNullConstant* constant) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall); locations->SetOut(Location::ConstantLocation(constant)); } @@ -3491,7 +3491,7 @@ void InstructionCodeGeneratorARMVIXL::VisitNullConstant(HNullConstant* constant void LocationsBuilderARMVIXL::VisitLongConstant(HLongConstant* constant) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall); locations->SetOut(Location::ConstantLocation(constant)); } @@ -3501,7 +3501,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLongConstant(HLongConstant* constant void LocationsBuilderARMVIXL::VisitFloatConstant(HFloatConstant* constant) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall); locations->SetOut(Location::ConstantLocation(constant)); } @@ -3512,7 +3512,7 @@ void InstructionCodeGeneratorARMVIXL::VisitFloatConstant( void LocationsBuilderARMVIXL::VisitDoubleConstant(HDoubleConstant* constant) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall); locations->SetOut(Location::ConstantLocation(constant)); } @@ -3548,7 +3548,7 @@ void InstructionCodeGeneratorARMVIXL::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE void LocationsBuilderARMVIXL::VisitReturn(HReturn* ret) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(ret, LocationSummary::kNoCall); locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType())); } @@ -3722,7 +3722,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInvokePolymorphic(HInvokePolymorphic* void LocationsBuilderARMVIXL::VisitNeg(HNeg* neg) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall); switch (neg->GetResultType()) { case DataType::Type::kInt32: { locations->SetInAt(0, Location::RequiresRegister()); @@ -3794,7 +3794,7 @@ void LocationsBuilderARMVIXL::VisitTypeConversion(HTypeConversion* conversion) { ? LocationSummary::kCallOnMainOnly : LocationSummary::kNoCall; LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind); + new (GetGraph()->GetAllocator()) LocationSummary(conversion, call_kind); switch (result_type) { case DataType::Type::kUint8: @@ -4158,7 +4158,7 @@ void InstructionCodeGeneratorARMVIXL::VisitTypeConversion(HTypeConversion* conve void LocationsBuilderARMVIXL::VisitAdd(HAdd* add) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(add, LocationSummary::kNoCall); switch (add->GetResultType()) { case DataType::Type::kInt32: { locations->SetInAt(0, Location::RequiresRegister()); @@ -4223,7 +4223,7 @@ void InstructionCodeGeneratorARMVIXL::VisitAdd(HAdd* add) { void LocationsBuilderARMVIXL::VisitSub(HSub* sub) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(sub, LocationSummary::kNoCall); switch (sub->GetResultType()) { case DataType::Type::kInt32: { locations->SetInAt(0, Location::RequiresRegister()); @@ -4285,7 +4285,7 @@ void InstructionCodeGeneratorARMVIXL::VisitSub(HSub* sub) { void LocationsBuilderARMVIXL::VisitMul(HMul* mul) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall); switch (mul->GetResultType()) { case DataType::Type::kInt32: case DataType::Type::kInt64: { @@ -4494,7 +4494,7 @@ void LocationsBuilderARMVIXL::VisitDiv(HDiv* div) { call_kind = LocationSummary::kCallOnMainOnly; } - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(div, call_kind); switch (div->GetResultType()) { case DataType::Type::kInt32: { @@ -4607,7 +4607,7 @@ void LocationsBuilderARMVIXL::VisitRem(HRem* rem) { call_kind = LocationSummary::kNoCall; } - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(rem, call_kind); switch (type) { case DataType::Type::kInt32: { @@ -4734,7 +4734,7 @@ void LocationsBuilderARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instruction) { void InstructionCodeGeneratorARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instruction) { DivZeroCheckSlowPathARMVIXL* slow_path = - new (GetGraph()->GetArena()) DivZeroCheckSlowPathARMVIXL(instruction); + new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathARMVIXL(instruction); codegen_->AddSlowPath(slow_path); LocationSummary* locations = instruction->GetLocations(); @@ -4872,7 +4872,7 @@ void InstructionCodeGeneratorARMVIXL::HandleLongRotate(HRor* ror) { void LocationsBuilderARMVIXL::VisitRor(HRor* ror) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(ror, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(ror, LocationSummary::kNoCall); switch (ror->GetResultType()) { case DataType::Type::kInt32: { locations->SetInAt(0, Location::RequiresRegister()); @@ -4918,7 +4918,7 @@ void LocationsBuilderARMVIXL::HandleShift(HBinaryOperation* op) { DCHECK(op->IsShl() || op->IsShr() || op->IsUShr()); LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(op, LocationSummary::kNoCall); switch (op->GetResultType()) { case DataType::Type::kInt32: { @@ -5148,8 +5148,8 @@ void InstructionCodeGeneratorARMVIXL::VisitUShr(HUShr* ushr) { } void LocationsBuilderARMVIXL::VisitNewInstance(HNewInstance* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); if (instruction->IsStringAlloc()) { locations->AddTemp(LocationFrom(kMethodRegister)); } else { @@ -5182,8 +5182,8 @@ void InstructionCodeGeneratorARMVIXL::VisitNewInstance(HNewInstance* instruction } void LocationsBuilderARMVIXL::VisitNewArray(HNewArray* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); InvokeRuntimeCallingConventionARMVIXL calling_convention; locations->SetOut(LocationFrom(r0)); locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); @@ -5203,7 +5203,7 @@ void InstructionCodeGeneratorARMVIXL::VisitNewArray(HNewArray* instruction) { void LocationsBuilderARMVIXL::VisitParameterValue(HParameterValue* instruction) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); Location location = parameter_visitor_.GetNextLocation(instruction->GetType()); if (location.IsStackSlot()) { location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize()); @@ -5220,7 +5220,7 @@ void InstructionCodeGeneratorARMVIXL::VisitParameterValue( void LocationsBuilderARMVIXL::VisitCurrentMethod(HCurrentMethod* instruction) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); locations->SetOut(LocationFrom(kMethodRegister)); } @@ -5231,7 +5231,7 @@ void InstructionCodeGeneratorARMVIXL::VisitCurrentMethod( void LocationsBuilderARMVIXL::VisitNot(HNot* not_) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(not_, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); } @@ -5257,7 +5257,7 @@ void InstructionCodeGeneratorARMVIXL::VisitNot(HNot* not_) { void LocationsBuilderARMVIXL::VisitBooleanNot(HBooleanNot* bool_not) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(bool_not, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); } @@ -5268,7 +5268,7 @@ void InstructionCodeGeneratorARMVIXL::VisitBooleanNot(HBooleanNot* bool_not) { void LocationsBuilderARMVIXL::VisitCompare(HCompare* compare) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(compare, LocationSummary::kNoCall); switch (compare->InputAt(0)->GetType()) { case DataType::Type::kBool: case DataType::Type::kUint8: @@ -5359,7 +5359,7 @@ void InstructionCodeGeneratorARMVIXL::VisitCompare(HCompare* compare) { void LocationsBuilderARMVIXL::VisitPhi(HPhi* instruction) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) { locations->SetInAt(i, Location::Any()); } @@ -5437,7 +5437,7 @@ void LocationsBuilderARMVIXL::HandleFieldSet( DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet()); LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); DataType::Type field_type = field_info.GetFieldType(); @@ -5600,10 +5600,10 @@ void LocationsBuilderARMVIXL::HandleFieldGet(HInstruction* instruction, bool object_field_get_with_read_barrier = kEmitCompilerReadBarrier && (field_info.GetFieldType() == DataType::Type::kReference); LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, - object_field_get_with_read_barrier ? - LocationSummary::kCallOnSlowPath : - LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, + object_field_get_with_read_barrier + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall); if (object_field_get_with_read_barrier && kUseBakerReadBarrier) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } @@ -5960,7 +5960,7 @@ void CodeGeneratorARMVIXL::GenerateImplicitNullCheck(HNullCheck* instruction) { void CodeGeneratorARMVIXL::GenerateExplicitNullCheck(HNullCheck* instruction) { NullCheckSlowPathARMVIXL* slow_path = - new (GetGraph()->GetArena()) NullCheckSlowPathARMVIXL(instruction); + new (GetGraph()->GetAllocator()) NullCheckSlowPathARMVIXL(instruction); AddSlowPath(slow_path); __ CompareAndBranchIfZero(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel()); } @@ -6041,10 +6041,10 @@ void LocationsBuilderARMVIXL::VisitArrayGet(HArrayGet* instruction) { bool object_array_get_with_read_barrier = kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, - object_array_get_with_read_barrier ? - LocationSummary::kCallOnSlowPath : - LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, + object_array_get_with_read_barrier + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall); if (object_array_get_with_read_barrier && kUseBakerReadBarrier) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } @@ -6325,7 +6325,7 @@ void LocationsBuilderARMVIXL::VisitArraySet(HArraySet* instruction) { CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue()); bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck(); - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( instruction, may_need_runtime_call_for_type_check ? LocationSummary::kCallOnSlowPath : @@ -6433,7 +6433,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) { SlowPathCodeARMVIXL* slow_path = nullptr; if (may_need_runtime_call_for_type_check) { - slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathARMVIXL(instruction); + slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathARMVIXL(instruction); codegen_->AddSlowPath(slow_path); if (instruction->GetValueCanBeNull()) { vixl32::Label non_zero; @@ -6607,7 +6607,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) { void LocationsBuilderARMVIXL::VisitArrayLength(HArrayLength* instruction) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); } @@ -6631,7 +6631,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayLength(HArrayLength* instruction void LocationsBuilderARMVIXL::VisitIntermediateAddress(HIntermediateAddress* instruction) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instruction->GetOffset())); @@ -6694,7 +6694,7 @@ void InstructionCodeGeneratorARMVIXL::VisitBoundsCheck(HBoundsCheck* instruction int32_t index = Int32ConstantFrom(index_loc); if (index < 0 || index >= length) { SlowPathCodeARMVIXL* slow_path = - new (GetGraph()->GetArena()) BoundsCheckSlowPathARMVIXL(instruction); + new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARMVIXL(instruction); codegen_->AddSlowPath(slow_path); __ B(slow_path->GetEntryLabel()); } else { @@ -6705,13 +6705,13 @@ void InstructionCodeGeneratorARMVIXL::VisitBoundsCheck(HBoundsCheck* instruction } SlowPathCodeARMVIXL* slow_path = - new (GetGraph()->GetArena()) BoundsCheckSlowPathARMVIXL(instruction); + new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARMVIXL(instruction); __ Cmp(RegisterFrom(index_loc), length); codegen_->AddSlowPath(slow_path); __ B(hs, slow_path->GetEntryLabel()); } else { SlowPathCodeARMVIXL* slow_path = - new (GetGraph()->GetArena()) BoundsCheckSlowPathARMVIXL(instruction); + new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARMVIXL(instruction); __ Cmp(RegisterFrom(length_loc), InputOperandAt(instruction, 0)); codegen_->AddSlowPath(slow_path); __ B(ls, slow_path->GetEntryLabel()); @@ -6745,8 +6745,8 @@ void InstructionCodeGeneratorARMVIXL::VisitParallelMove(HParallelMove* instructi } void LocationsBuilderARMVIXL::VisitSuspendCheck(HSuspendCheck* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnSlowPath); locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } @@ -6770,7 +6770,8 @@ void InstructionCodeGeneratorARMVIXL::GenerateSuspendCheck(HSuspendCheck* instru SuspendCheckSlowPathARMVIXL* slow_path = down_cast<SuspendCheckSlowPathARMVIXL*>(instruction->GetSlowPath()); if (slow_path == nullptr) { - slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARMVIXL(instruction, successor); + slow_path = + new (GetGraph()->GetAllocator()) SuspendCheckSlowPathARMVIXL(instruction, successor); instruction->SetSlowPath(slow_path); codegen_->AddSlowPath(slow_path); if (successor != nullptr) { @@ -7085,7 +7086,7 @@ void LocationsBuilderARMVIXL::VisitLoadClass(HLoadClass* cls) { LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier) ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind); if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } @@ -7208,7 +7209,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_ if (generate_null_check || cls->MustGenerateClinitCheck()) { DCHECK(cls->CanCallRuntime()); - LoadClassSlowPathARMVIXL* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARMVIXL( + LoadClassSlowPathARMVIXL* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathARMVIXL( cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck()); codegen_->AddSlowPath(slow_path); if (generate_null_check) { @@ -7225,7 +7226,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_ void LocationsBuilderARMVIXL::VisitClinitCheck(HClinitCheck* check) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath); + new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath); locations->SetInAt(0, Location::RequiresRegister()); if (check->HasUses()) { locations->SetOut(Location::SameAsFirstInput()); @@ -7235,10 +7236,10 @@ void LocationsBuilderARMVIXL::VisitClinitCheck(HClinitCheck* check) { void InstructionCodeGeneratorARMVIXL::VisitClinitCheck(HClinitCheck* check) { // We assume the class is not null. LoadClassSlowPathARMVIXL* slow_path = - new (GetGraph()->GetArena()) LoadClassSlowPathARMVIXL(check->GetLoadClass(), - check, - check->GetDexPc(), - /* do_clinit */ true); + new (GetGraph()->GetAllocator()) LoadClassSlowPathARMVIXL(check->GetLoadClass(), + check, + check->GetDexPc(), + /* do_clinit */ true); codegen_->AddSlowPath(slow_path); GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0)); } @@ -7279,7 +7280,7 @@ HLoadString::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadStringKind( void LocationsBuilderARMVIXL::VisitLoadString(HLoadString* load) { LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load); - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind); HLoadString::LoadKind load_kind = load->GetLoadKind(); if (load_kind == HLoadString::LoadKind::kRuntimeCall) { locations->SetOut(LocationFrom(r0)); @@ -7348,7 +7349,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE codegen_->EmitMovwMovtPlaceholder(labels, temp); GenerateGcRootFieldLoad(load, out_loc, temp, /* offset */ 0, kCompilerReadBarrierOption); LoadStringSlowPathARMVIXL* slow_path = - new (GetGraph()->GetArena()) LoadStringSlowPathARMVIXL(load); + new (GetGraph()->GetAllocator()) LoadStringSlowPathARMVIXL(load); codegen_->AddSlowPath(slow_path); __ CompareAndBranchIfZero(out, slow_path->GetEntryLabel()); __ Bind(slow_path->GetExitLabel()); @@ -7382,7 +7383,7 @@ static int32_t GetExceptionTlsOffset() { void LocationsBuilderARMVIXL::VisitLoadException(HLoadException* load) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall); locations->SetOut(Location::RequiresRegister()); } @@ -7393,7 +7394,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadException(HLoadException* load) { void LocationsBuilderARMVIXL::VisitClearException(HClearException* clear) { - new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall); } void InstructionCodeGeneratorARMVIXL::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) { @@ -7404,8 +7405,8 @@ void InstructionCodeGeneratorARMVIXL::VisitClearException(HClearException* clear } void LocationsBuilderARMVIXL::VisitThrow(HThrow* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); InvokeRuntimeCallingConventionARMVIXL calling_convention; locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); } @@ -7457,7 +7458,8 @@ void LocationsBuilderARMVIXL::VisitInstanceOf(HInstanceOf* instruction) { break; } - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind); if (baker_read_barrier_slow_path) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } @@ -7674,8 +7676,8 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction) kWithoutReadBarrier); __ Cmp(out, cls); DCHECK(locations->OnlyCallsOnSlowPath()); - slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARMVIXL(instruction, - /* is_fatal */ false); + slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARMVIXL(instruction, + /* is_fatal */ false); codegen_->AddSlowPath(slow_path); __ B(ne, slow_path->GetEntryLabel()); __ Mov(out, 1); @@ -7703,8 +7705,8 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction) // call to the runtime not using a type checking slow path). // This should also be beneficial for the other cases above. DCHECK(locations->OnlyCallsOnSlowPath()); - slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARMVIXL(instruction, - /* is_fatal */ false); + slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARMVIXL(instruction, + /* is_fatal */ false); codegen_->AddSlowPath(slow_path); __ B(slow_path->GetEntryLabel()); break; @@ -7741,7 +7743,8 @@ void LocationsBuilderARMVIXL::VisitCheckCast(HCheckCast* instruction) { break; } - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind)); @@ -7781,8 +7784,8 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) { !instruction->CanThrowIntoCatchBlock(); } SlowPathCodeARMVIXL* type_check_slow_path = - new (GetGraph()->GetArena()) TypeCheckSlowPathARMVIXL(instruction, - is_type_check_slow_path_fatal); + new (GetGraph()->GetAllocator()) TypeCheckSlowPathARMVIXL(instruction, + is_type_check_slow_path_fatal); codegen_->AddSlowPath(type_check_slow_path); vixl32::Label done; @@ -7957,8 +7960,8 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) { } void LocationsBuilderARMVIXL::VisitMonitorOperation(HMonitorOperation* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); InvokeRuntimeCallingConventionARMVIXL calling_convention; locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); } @@ -7989,7 +7992,7 @@ void LocationsBuilderARMVIXL::VisitXor(HXor* instruction) { void LocationsBuilderARMVIXL::HandleBitwiseOperation(HBinaryOperation* instruction, Opcode opcode) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); DCHECK(instruction->GetResultType() == DataType::Type::kInt32 || instruction->GetResultType() == DataType::Type::kInt64); // Note: GVN reorders commutative operations to have the constant on the right hand side. @@ -8012,7 +8015,7 @@ void InstructionCodeGeneratorARMVIXL::VisitXor(HXor* instruction) { void LocationsBuilderARMVIXL::VisitBitwiseNegatedRight(HBitwiseNegatedRight* instruction) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); DCHECK(instruction->GetResultType() == DataType::Type::kInt32 || instruction->GetResultType() == DataType::Type::kInt64); @@ -8079,7 +8082,7 @@ void LocationsBuilderARMVIXL::VisitDataProcWithShifterOp( DCHECK(instruction->GetType() == DataType::Type::kInt32 || instruction->GetType() == DataType::Type::kInt64); LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); const bool overlap = instruction->GetType() == DataType::Type::kInt64 && HDataProcWithShifterOp::IsExtensionOp(instruction->GetOpKind()); @@ -8443,7 +8446,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateGcRootFieldLoad( // Slow path marking the GC root `root`. The entrypoint will // be loaded by the slow path code. SlowPathCodeARMVIXL* slow_path = - new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARMVIXL(instruction, root); + new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathARMVIXL(instruction, root); codegen_->AddSlowPath(slow_path); // /* GcRoot<mirror::Object> */ root = *(obj + offset) @@ -8692,7 +8695,7 @@ void CodeGeneratorARMVIXL::GenerateReferenceLoadWithBakerReadBarrier(HInstructio // Slow path marking the object `ref` when the GC is marking. The // entrypoint will be loaded by the slow path code. SlowPathCodeARMVIXL* slow_path = - new (GetGraph()->GetArena()) LoadReferenceWithBakerReadBarrierSlowPathARMVIXL( + new (GetGraph()->GetAllocator()) LoadReferenceWithBakerReadBarrierSlowPathARMVIXL( instruction, ref, obj, offset, index, scale_factor, needs_null_check, temp_reg); AddSlowPath(slow_path); @@ -8738,8 +8741,8 @@ void CodeGeneratorARMVIXL::UpdateReferenceFieldWithBakerReadBarrier(HInstruction // Slow path updating the object reference at address `obj + field_offset` // when the GC is marking. The entrypoint will be loaded by the slow path code. - SlowPathCodeARMVIXL* slow_path = - new (GetGraph()->GetArena()) LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARMVIXL( + SlowPathCodeARMVIXL* slow_path = new (GetGraph()->GetAllocator()) + LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARMVIXL( instruction, ref, obj, @@ -8850,7 +8853,7 @@ void CodeGeneratorARMVIXL::GenerateReadBarrierSlow(HInstruction* instruction, // not used by the artReadBarrierSlow entry point. // // TODO: Unpoison `ref` when it is used by artReadBarrierSlow. - SlowPathCodeARMVIXL* slow_path = new (GetGraph()->GetArena()) + SlowPathCodeARMVIXL* slow_path = new (GetGraph()->GetAllocator()) ReadBarrierForHeapReferenceSlowPathARMVIXL(instruction, out, ref, obj, offset, index); AddSlowPath(slow_path); @@ -8886,7 +8889,7 @@ void CodeGeneratorARMVIXL::GenerateReadBarrierForRootSlow(HInstruction* instruct // Note that GC roots are not affected by heap poisoning, so we do // not need to do anything special for this here. SlowPathCodeARMVIXL* slow_path = - new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathARMVIXL(instruction, out, root); + new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathARMVIXL(instruction, out, root); AddSlowPath(slow_path); __ B(slow_path->GetEntryLabel()); @@ -9191,7 +9194,7 @@ VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateUint32Literal( void LocationsBuilderARMVIXL::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instr, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instr, LocationSummary::kNoCall); locations->SetInAt(HMultiplyAccumulate::kInputAccumulatorIndex, Location::RequiresRegister()); locations->SetInAt(HMultiplyAccumulate::kInputMulLeftIndex, Location::RequiresRegister()); @@ -9228,7 +9231,7 @@ void InstructionCodeGeneratorARMVIXL::VisitBoundType(HBoundType* instruction ATT // Simple implementation of packed switch - generate cascaded compare/jumps. void LocationsBuilderARMVIXL::VisitPackedSwitch(HPackedSwitch* switch_instr) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); if (switch_instr->GetNumEntries() > kPackedSwitchCompareJumpThreshold && codegen_->GetAssembler()->GetVIXLAssembler()->IsUsingT32()) { @@ -9342,7 +9345,7 @@ void CodeGeneratorARMVIXL::MoveFromReturnRegister(Location trg, DataType::Type t TODO_VIXL32(FATAL); } else { // Let the parallel move resolver take care of all of this. - HParallelMove parallel_move(GetGraph()->GetArena()); + HParallelMove parallel_move(GetGraph()->GetAllocator()); parallel_move.AddMove(return_loc, trg, type, nullptr); GetMoveResolver()->EmitNativeCode(¶llel_move); } @@ -9350,7 +9353,7 @@ void CodeGeneratorARMVIXL::MoveFromReturnRegister(Location trg, DataType::Type t void LocationsBuilderARMVIXL::VisitClassTableGet(HClassTableGet* instruction) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister()); } diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h index 58b85259e7..c46d17ccec 100644 --- a/compiler/optimizing/code_generator_arm_vixl.h +++ b/compiler/optimizing/code_generator_arm_vixl.h @@ -119,7 +119,7 @@ class JumpTableARMVIXL : public DeletableArenaObject<kArenaAllocSwitchTable> { explicit JumpTableARMVIXL(HPackedSwitch* switch_instr) : switch_instr_(switch_instr), table_start_(), - bb_addresses_(switch_instr->GetArena()->Adapter(kArenaAllocCodeGenerator)) { + bb_addresses_(switch_instr->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) { uint32_t num_entries = switch_instr_->GetNumEntries(); for (uint32_t i = 0; i < num_entries; i++) { VIXLInt32Literal *lit = new VIXLInt32Literal(0, vixl32::RawLiteral::kManuallyPlaced); @@ -739,7 +739,7 @@ class CodeGeneratorARMVIXL : public CodeGenerator { void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE; JumpTableARMVIXL* CreateJumpTable(HPackedSwitch* switch_instr) { - jump_tables_.emplace_back(new (GetGraph()->GetArena()) JumpTableARMVIXL(switch_instr)); + jump_tables_.emplace_back(new (GetGraph()->GetAllocator()) JumpTableARMVIXL(switch_instr)); return jump_tables_.back().get(); } void EmitJumpTables(); diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index 3c592e7e37..7ea7b9cee2 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -557,7 +557,7 @@ class ArraySetSlowPathMIPS : public SlowPathCodeMIPS { SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; - HParallelMove parallel_move(codegen->GetGraph()->GetArena()); + HParallelMove parallel_move(codegen->GetGraph()->GetAllocator()); parallel_move.AddMove( locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), @@ -968,7 +968,7 @@ class ReadBarrierForHeapReferenceSlowPathMIPS : public SlowPathCodeMIPS { // We're moving two or three locations to locations that could // overlap, so we need a parallel move resolver. InvokeRuntimeCallingConvention calling_convention; - HParallelMove parallel_move(codegen->GetGraph()->GetArena()); + HParallelMove parallel_move(codegen->GetGraph()->GetAllocator()); parallel_move.AddMove(ref_, Location::RegisterLocation(calling_convention.GetRegisterAt(0)), DataType::Type::kReference, @@ -1100,19 +1100,19 @@ CodeGeneratorMIPS::CodeGeneratorMIPS(HGraph* graph, block_labels_(nullptr), location_builder_(graph, this), instruction_visitor_(graph, this), - move_resolver_(graph->GetArena(), this), - assembler_(graph->GetArena(), &isa_features), + move_resolver_(graph->GetAllocator(), this), + assembler_(graph->GetAllocator(), &isa_features), isa_features_(isa_features), uint32_literals_(std::less<uint32_t>(), - graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - pc_relative_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - string_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), + graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + pc_relative_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + pc_relative_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + pc_relative_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + jit_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + jit_class_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), clobbered_ra_(false) { // Save RA (containing the return address) to mimic Quick. AddAllocatedRegister(Location::RegisterLocation(RA)); @@ -1998,7 +1998,7 @@ void InstructionCodeGeneratorMIPS::GenerateMemoryBarrier(MemBarrierKind kind ATT void InstructionCodeGeneratorMIPS::GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor) { SuspendCheckSlowPathMIPS* slow_path = - new (GetGraph()->GetArena()) SuspendCheckSlowPathMIPS(instruction, successor); + new (GetGraph()->GetAllocator()) SuspendCheckSlowPathMIPS(instruction, successor); codegen_->AddSlowPath(slow_path); __ LoadFromOffset(kLoadUnsignedHalfword, @@ -2023,7 +2023,7 @@ InstructionCodeGeneratorMIPS::InstructionCodeGeneratorMIPS(HGraph* graph, void LocationsBuilderMIPS::HandleBinaryOp(HBinaryOperation* instruction) { DCHECK_EQ(instruction->InputCount(), 2U); - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); DataType::Type type = instruction->GetResultType(); switch (type) { case DataType::Type::kInt32: { @@ -2289,7 +2289,7 @@ void InstructionCodeGeneratorMIPS::HandleBinaryOp(HBinaryOperation* instruction) void LocationsBuilderMIPS::HandleShift(HBinaryOperation* instr) { DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor()); - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instr); DataType::Type type = instr->GetResultType(); switch (type) { case DataType::Type::kInt32: @@ -2542,10 +2542,10 @@ void LocationsBuilderMIPS::VisitArrayGet(HArrayGet* instruction) { bool object_array_get_with_read_barrier = kEmitCompilerReadBarrier && (type == DataType::Type::kReference); LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, - object_array_get_with_read_barrier - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, + object_array_get_with_read_barrier + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall); if (object_array_get_with_read_barrier && kUseBakerReadBarrier) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } @@ -2824,7 +2824,7 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) { } void LocationsBuilderMIPS::VisitArrayLength(HArrayLength* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); } @@ -2868,7 +2868,7 @@ void LocationsBuilderMIPS::VisitArraySet(HArraySet* instruction) { CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue()); bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck(); - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( instruction, may_need_runtime_call_for_type_check ? LocationSummary::kCallOnSlowPath : @@ -2986,7 +2986,7 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) { SlowPathCodeMIPS* slow_path = nullptr; if (may_need_runtime_call_for_type_check) { - slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathMIPS(instruction); + slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathMIPS(instruction); codegen_->AddSlowPath(slow_path); if (instruction->GetValueCanBeNull()) { MipsLabel non_zero; @@ -3141,7 +3141,7 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) { void LocationsBuilderMIPS::VisitIntermediateArrayAddressIndex( HIntermediateArrayAddressIndex* instruction) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); HIntConstant* shift = instruction->GetShift()->AsIntConstant(); @@ -3171,7 +3171,7 @@ void LocationsBuilderMIPS::VisitBoundsCheck(HBoundsCheck* instruction) { void InstructionCodeGeneratorMIPS::VisitBoundsCheck(HBoundsCheck* instruction) { LocationSummary* locations = instruction->GetLocations(); BoundsCheckSlowPathMIPS* slow_path = - new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS(instruction); + new (GetGraph()->GetAllocator()) BoundsCheckSlowPathMIPS(instruction); codegen_->AddSlowPath(slow_path); Register index = locations->InAt(0).AsRegister<Register>(); @@ -3222,7 +3222,8 @@ void LocationsBuilderMIPS::VisitCheckCast(HCheckCast* instruction) { break; } - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind)); @@ -3262,8 +3263,8 @@ void InstructionCodeGeneratorMIPS::VisitCheckCast(HCheckCast* instruction) { !instruction->CanThrowIntoCatchBlock(); } SlowPathCodeMIPS* slow_path = - new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction, - is_type_check_slow_path_fatal); + new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS(instruction, + is_type_check_slow_path_fatal); codegen_->AddSlowPath(slow_path); // Avoid this check if we know `obj` is not null. @@ -3417,7 +3418,7 @@ void InstructionCodeGeneratorMIPS::VisitCheckCast(HCheckCast* instruction) { void LocationsBuilderMIPS::VisitClinitCheck(HClinitCheck* check) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath); + new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath); locations->SetInAt(0, Location::RequiresRegister()); if (check->HasUses()) { locations->SetOut(Location::SameAsFirstInput()); @@ -3426,7 +3427,7 @@ void LocationsBuilderMIPS::VisitClinitCheck(HClinitCheck* check) { void InstructionCodeGeneratorMIPS::VisitClinitCheck(HClinitCheck* check) { // We assume the class is not null. - SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS( + SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS( check->GetLoadClass(), check, check->GetDexPc(), @@ -3440,7 +3441,7 @@ void LocationsBuilderMIPS::VisitCompare(HCompare* compare) { DataType::Type in_type = compare->InputAt(0)->GetType(); LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(compare, LocationSummary::kNoCall); switch (in_type) { case DataType::Type::kBool: @@ -3601,7 +3602,7 @@ void InstructionCodeGeneratorMIPS::VisitCompare(HCompare* instruction) { } void LocationsBuilderMIPS::HandleCondition(HCondition* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); switch (instruction->InputAt(0)->GetType()) { default: case DataType::Type::kInt64: @@ -3815,7 +3816,7 @@ void LocationsBuilderMIPS::VisitDiv(HDiv* div) { ? LocationSummary::kCallOnMainOnly : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(div, call_kind); switch (type) { case DataType::Type::kInt32: @@ -3882,7 +3883,8 @@ void LocationsBuilderMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) { } void InstructionCodeGeneratorMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) { - SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathMIPS(instruction); + SlowPathCodeMIPS* slow_path = + new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathMIPS(instruction); codegen_->AddSlowPath(slow_path); Location value = instruction->GetLocations()->InAt(0); DataType::Type type = instruction->GetType(); @@ -3929,7 +3931,7 @@ void InstructionCodeGeneratorMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) void LocationsBuilderMIPS::VisitDoubleConstant(HDoubleConstant* constant) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall); locations->SetOut(Location::ConstantLocation(constant)); } @@ -3946,7 +3948,7 @@ void InstructionCodeGeneratorMIPS::VisitExit(HExit* exit ATTRIBUTE_UNUSED) { void LocationsBuilderMIPS::VisitFloatConstant(HFloatConstant* constant) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall); locations->SetOut(Location::ConstantLocation(constant)); } @@ -5526,7 +5528,7 @@ void InstructionCodeGeneratorMIPS::GenerateTestAndBranch(HInstruction* instructi } void LocationsBuilderMIPS::VisitIf(HIf* if_instr) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr); if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) { locations->SetInAt(0, Location::RequiresRegister()); } @@ -5543,7 +5545,7 @@ void InstructionCodeGeneratorMIPS::VisitIf(HIf* if_instr) { } void LocationsBuilderMIPS::VisitDeoptimize(HDeoptimize* deoptimize) { - LocationSummary* locations = new (GetGraph()->GetArena()) + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath); InvokeRuntimeCallingConvention calling_convention; RegisterSet caller_saves = RegisterSet::Empty(); @@ -6098,7 +6100,7 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR6(HSelect* select) { } void LocationsBuilderMIPS::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) { - LocationSummary* locations = new (GetGraph()->GetArena()) + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(flag, LocationSummary::kNoCall); locations->SetOut(Location::RequiresRegister()); } @@ -6111,7 +6113,7 @@ void InstructionCodeGeneratorMIPS::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFl } void LocationsBuilderMIPS::VisitSelect(HSelect* select) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select); CanMoveConditionally(select, codegen_->GetInstructionSetFeatures().IsR6(), locations); } @@ -6136,7 +6138,7 @@ void InstructionCodeGeneratorMIPS::VisitSelect(HSelect* select) { } void LocationsBuilderMIPS::VisitNativeDebugInfo(HNativeDebugInfo* info) { - new (GetGraph()->GetArena()) LocationSummary(info); + new (GetGraph()->GetAllocator()) LocationSummary(info); } void InstructionCodeGeneratorMIPS::VisitNativeDebugInfo(HNativeDebugInfo*) { @@ -6153,7 +6155,7 @@ void LocationsBuilderMIPS::HandleFieldGet(HInstruction* instruction, const Field bool generate_volatile = field_info.IsVolatile() && is_wide; bool object_field_get_with_read_barrier = kEmitCompilerReadBarrier && (field_type == DataType::Type::kReference); - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( instruction, generate_volatile ? LocationSummary::kCallOnMainOnly @@ -6327,7 +6329,7 @@ void LocationsBuilderMIPS::HandleFieldSet(HInstruction* instruction, const Field DataType::Type field_type = field_info.GetFieldType(); bool is_wide = (field_type == DataType::Type::kInt64) || (field_type == DataType::Type::kFloat64); bool generate_volatile = field_info.IsVolatile() && is_wide; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( instruction, generate_volatile ? LocationSummary::kCallOnMainOnly : LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); @@ -6691,7 +6693,7 @@ void InstructionCodeGeneratorMIPS::GenerateGcRootFieldLoad(HInstruction* instruc // Slow path marking the GC root `root`. Location temp = Location::RegisterLocation(T9); SlowPathCodeMIPS* slow_path = - new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS( + new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS( instruction, root, /*entrypoint*/ temp); @@ -7018,14 +7020,14 @@ void CodeGeneratorMIPS::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* // to be null in this code path. DCHECK_EQ(offset, 0u); DCHECK_EQ(scale_factor, ScaleFactor::TIMES_1); - slow_path = new (GetGraph()->GetArena()) + slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathMIPS(instruction, ref, obj, /* field_offset */ index, temp_reg); } else { - slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS(instruction, ref); + slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS(instruction, ref); } AddSlowPath(slow_path); @@ -7061,7 +7063,7 @@ void CodeGeneratorMIPS::GenerateReadBarrierSlow(HInstruction* instruction, // not used by the artReadBarrierSlow entry point. // // TODO: Unpoison `ref` when it is used by artReadBarrierSlow. - SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) + SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator()) ReadBarrierForHeapReferenceSlowPathMIPS(instruction, out, ref, obj, offset, index); AddSlowPath(slow_path); @@ -7097,7 +7099,7 @@ void CodeGeneratorMIPS::GenerateReadBarrierForRootSlow(HInstruction* instruction // Note that GC roots are not affected by heap poisoning, so we do // not need to do anything special for this here. SlowPathCodeMIPS* slow_path = - new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathMIPS(instruction, out, root); + new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathMIPS(instruction, out, root); AddSlowPath(slow_path); __ B(slow_path->GetEntryLabel()); @@ -7124,7 +7126,8 @@ void LocationsBuilderMIPS::VisitInstanceOf(HInstanceOf* instruction) { break; } - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind); if (baker_read_barrier_slow_path) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } @@ -7266,8 +7269,8 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) { maybe_temp_loc, kWithoutReadBarrier); DCHECK(locations->OnlyCallsOnSlowPath()); - slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction, - /* is_fatal */ false); + slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS(instruction, + /* is_fatal */ false); codegen_->AddSlowPath(slow_path); __ Bne(out, cls, slow_path->GetEntryLabel()); __ LoadConst32(out, 1); @@ -7295,8 +7298,8 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) { // call to the runtime not using a type checking slow path). // This should also be beneficial for the other cases above. DCHECK(locations->OnlyCallsOnSlowPath()); - slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction, - /* is_fatal */ false); + slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS(instruction, + /* is_fatal */ false); codegen_->AddSlowPath(slow_path); __ B(slow_path->GetEntryLabel()); break; @@ -7311,7 +7314,7 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) { } void LocationsBuilderMIPS::VisitIntConstant(HIntConstant* constant) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant); locations->SetOut(Location::ConstantLocation(constant)); } @@ -7320,7 +7323,7 @@ void InstructionCodeGeneratorMIPS::VisitIntConstant(HIntConstant* constant ATTRI } void LocationsBuilderMIPS::VisitNullConstant(HNullConstant* constant) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant); locations->SetOut(Location::ConstantLocation(constant)); } @@ -7661,7 +7664,7 @@ void LocationsBuilderMIPS::VisitLoadClass(HLoadClass* cls) { LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier) ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind); if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } @@ -7839,7 +7842,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF if (generate_null_check || cls->MustGenerateClinitCheck()) { DCHECK(cls->CanCallRuntime()); - SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS( + SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS( cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck(), bss_info_high); codegen_->AddSlowPath(slow_path); if (generate_null_check) { @@ -7859,7 +7862,7 @@ static int32_t GetExceptionTlsOffset() { void LocationsBuilderMIPS::VisitLoadException(HLoadException* load) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall); locations->SetOut(Location::RequiresRegister()); } @@ -7869,7 +7872,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadException(HLoadException* load) { } void LocationsBuilderMIPS::VisitClearException(HClearException* clear) { - new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall); } void InstructionCodeGeneratorMIPS::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) { @@ -7878,7 +7881,7 @@ void InstructionCodeGeneratorMIPS::VisitClearException(HClearException* clear AT void LocationsBuilderMIPS::VisitLoadString(HLoadString* load) { LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load); - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind); HLoadString::LoadKind load_kind = load->GetLoadKind(); const bool isR6 = codegen_->GetInstructionSetFeatures().IsR6(); const bool has_irreducible_loops = codegen_->GetGraph()->HasIrreducibleLoops(); @@ -8004,7 +8007,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_ kCompilerReadBarrierOption, &info_low->label); SlowPathCodeMIPS* slow_path = - new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load, info_high); + new (GetGraph()->GetAllocator()) LoadStringSlowPathMIPS(load, info_high); codegen_->AddSlowPath(slow_path); __ Beqz(out, slow_path->GetEntryLabel()); __ Bind(slow_path->GetExitLabel()); @@ -8041,7 +8044,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_ } void LocationsBuilderMIPS::VisitLongConstant(HLongConstant* constant) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant); locations->SetOut(Location::ConstantLocation(constant)); } @@ -8050,8 +8053,8 @@ void InstructionCodeGeneratorMIPS::VisitLongConstant(HLongConstant* constant ATT } void LocationsBuilderMIPS::VisitMonitorOperation(HMonitorOperation* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); } @@ -8068,7 +8071,7 @@ void InstructionCodeGeneratorMIPS::VisitMonitorOperation(HMonitorOperation* inst void LocationsBuilderMIPS::VisitMul(HMul* mul) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall); switch (mul->GetResultType()) { case DataType::Type::kInt32: case DataType::Type::kInt64: @@ -8163,7 +8166,7 @@ void InstructionCodeGeneratorMIPS::VisitMul(HMul* instruction) { void LocationsBuilderMIPS::VisitNeg(HNeg* neg) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall); switch (neg->GetResultType()) { case DataType::Type::kInt32: case DataType::Type::kInt64: @@ -8221,8 +8224,8 @@ void InstructionCodeGeneratorMIPS::VisitNeg(HNeg* instruction) { } void LocationsBuilderMIPS::VisitNewArray(HNewArray* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); InvokeRuntimeCallingConvention calling_convention; locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kReference)); locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); @@ -8240,8 +8243,8 @@ void InstructionCodeGeneratorMIPS::VisitNewArray(HNewArray* instruction) { } void LocationsBuilderMIPS::VisitNewInstance(HNewInstance* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); InvokeRuntimeCallingConvention calling_convention; if (instruction->IsStringAlloc()) { locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument)); @@ -8270,7 +8273,7 @@ void InstructionCodeGeneratorMIPS::VisitNewInstance(HNewInstance* instruction) { } void LocationsBuilderMIPS::VisitNot(HNot* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); } @@ -8303,7 +8306,7 @@ void InstructionCodeGeneratorMIPS::VisitNot(HNot* instruction) { } void LocationsBuilderMIPS::VisitBooleanNot(HBooleanNot* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); } @@ -8331,7 +8334,7 @@ void CodeGeneratorMIPS::GenerateImplicitNullCheck(HNullCheck* instruction) { } void CodeGeneratorMIPS::GenerateExplicitNullCheck(HNullCheck* instruction) { - SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathMIPS(instruction); + SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator()) NullCheckSlowPathMIPS(instruction); AddSlowPath(slow_path); Location obj = instruction->GetLocations()->InAt(0); @@ -8360,7 +8363,7 @@ void InstructionCodeGeneratorMIPS::VisitParallelMove(HParallelMove* instruction) } void LocationsBuilderMIPS::VisitParameterValue(HParameterValue* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); Location location = parameter_visitor_.GetNextLocation(instruction->GetType()); if (location.IsStackSlot()) { location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize()); @@ -8377,7 +8380,7 @@ void InstructionCodeGeneratorMIPS::VisitParameterValue(HParameterValue* instruct void LocationsBuilderMIPS::VisitCurrentMethod(HCurrentMethod* instruction) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument)); } @@ -8387,7 +8390,7 @@ void InstructionCodeGeneratorMIPS::VisitCurrentMethod(HCurrentMethod* instructio } void LocationsBuilderMIPS::VisitPhi(HPhi* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) { locations->SetInAt(i, Location::Any()); } @@ -8403,7 +8406,7 @@ void LocationsBuilderMIPS::VisitRem(HRem* rem) { LocationSummary::CallKind call_kind = (type == DataType::Type::kInt32) ? LocationSummary::kNoCall : LocationSummary::kCallOnMainOnly; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(rem, call_kind); switch (type) { case DataType::Type::kInt32: @@ -8481,7 +8484,7 @@ void InstructionCodeGeneratorMIPS::VisitMemoryBarrier(HMemoryBarrier* memory_bar } void LocationsBuilderMIPS::VisitReturn(HReturn* ret) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(ret); DataType::Type return_type = ret->InputAt(0)->GetType(); locations->SetInAt(0, MipsReturnLocation(return_type)); } @@ -8622,8 +8625,8 @@ void InstructionCodeGeneratorMIPS::VisitUnresolvedStaticFieldSet( } void LocationsBuilderMIPS::VisitSuspendCheck(HSuspendCheck* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnSlowPath); // In suspend check slow path, usually there are no caller-save registers at all. // If SIMD instructions are present, however, we force spilling all live SIMD // registers in full width (since the runtime only saves/restores lower part). @@ -8646,8 +8649,8 @@ void InstructionCodeGeneratorMIPS::VisitSuspendCheck(HSuspendCheck* instruction) } void LocationsBuilderMIPS::VisitThrow(HThrow* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); } @@ -8676,7 +8679,8 @@ void LocationsBuilderMIPS::VisitTypeConversion(HTypeConversion* conversion) { call_kind = LocationSummary::kCallOnMainOnly; } - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind); + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(conversion, call_kind); if (call_kind == LocationSummary::kNoCall) { if (DataType::IsFloatingPointType(input_type)) { @@ -9014,7 +9018,7 @@ void InstructionCodeGeneratorMIPS::VisitAboveOrEqual(HAboveOrEqual* comp) { void LocationsBuilderMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); } @@ -9123,7 +9127,7 @@ void InstructionCodeGeneratorMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr void LocationsBuilderMIPS::VisitMipsPackedSwitch(HMipsPackedSwitch* switch_instr) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); // Constant area pointer (HMipsComputeBaseMethodAddress). locations->SetInAt(1, Location::RequiresRegister()); @@ -9152,7 +9156,7 @@ void InstructionCodeGeneratorMIPS::VisitMipsPackedSwitch(HMipsPackedSwitch* swit void LocationsBuilderMIPS::VisitMipsComputeBaseMethodAddress( HMipsComputeBaseMethodAddress* insn) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(insn, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(insn, LocationSummary::kNoCall); locations->SetOut(Location::RequiresRegister()); } @@ -9185,7 +9189,7 @@ void InstructionCodeGeneratorMIPS::VisitInvokeUnresolved(HInvokeUnresolved* invo void LocationsBuilderMIPS::VisitClassTableGet(HClassTableGet* instruction) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister()); } diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 687700380b..fad0fe74e5 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -512,7 +512,7 @@ class ArraySetSlowPathMIPS64 : public SlowPathCodeMIPS64 { SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; - HParallelMove parallel_move(codegen->GetGraph()->GetArena()); + HParallelMove parallel_move(codegen->GetGraph()->GetAllocator()); parallel_move.AddMove( locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), @@ -910,7 +910,7 @@ class ReadBarrierForHeapReferenceSlowPathMIPS64 : public SlowPathCodeMIPS64 { // We're moving two or three locations to locations that could // overlap, so we need a parallel move resolver. InvokeRuntimeCallingConvention calling_convention; - HParallelMove parallel_move(codegen->GetGraph()->GetArena()); + HParallelMove parallel_move(codegen->GetGraph()->GetAllocator()); parallel_move.AddMove(ref_, Location::RegisterLocation(calling_convention.GetRegisterAt(0)), DataType::Type::kReference, @@ -1041,23 +1041,23 @@ CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph, block_labels_(nullptr), location_builder_(graph, this), instruction_visitor_(graph, this), - move_resolver_(graph->GetArena(), this), - assembler_(graph->GetArena(), &isa_features), + move_resolver_(graph->GetAllocator(), this), + assembler_(graph->GetAllocator(), &isa_features), isa_features_(isa_features), uint32_literals_(std::less<uint32_t>(), - graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), + graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), uint64_literals_(std::less<uint64_t>(), - graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - pc_relative_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - string_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), + graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + pc_relative_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + pc_relative_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + pc_relative_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), jit_string_patches_(StringReferenceValueComparator(), - graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), + graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), jit_class_patches_(TypeReferenceValueComparator(), - graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) { + graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) { // Save RA (containing the return address) to mimic Quick. AddAllocatedRegister(Location::RegisterLocation(RA)); } @@ -1835,7 +1835,7 @@ void InstructionCodeGeneratorMIPS64::GenerateMemoryBarrier(MemBarrierKind kind A void InstructionCodeGeneratorMIPS64::GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor) { SuspendCheckSlowPathMIPS64* slow_path = - new (GetGraph()->GetArena()) SuspendCheckSlowPathMIPS64(instruction, successor); + new (GetGraph()->GetAllocator()) SuspendCheckSlowPathMIPS64(instruction, successor); codegen_->AddSlowPath(slow_path); __ LoadFromOffset(kLoadUnsignedHalfword, @@ -1860,7 +1860,7 @@ InstructionCodeGeneratorMIPS64::InstructionCodeGeneratorMIPS64(HGraph* graph, void LocationsBuilderMIPS64::HandleBinaryOp(HBinaryOperation* instruction) { DCHECK_EQ(instruction->InputCount(), 2U); - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); DataType::Type type = instruction->GetResultType(); switch (type) { case DataType::Type::kInt32: @@ -1990,7 +1990,7 @@ void InstructionCodeGeneratorMIPS64::HandleBinaryOp(HBinaryOperation* instructio void LocationsBuilderMIPS64::HandleShift(HBinaryOperation* instr) { DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor()); - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instr); DataType::Type type = instr->GetResultType(); switch (type) { case DataType::Type::kInt32: @@ -2119,10 +2119,10 @@ void LocationsBuilderMIPS64::VisitArrayGet(HArrayGet* instruction) { bool object_array_get_with_read_barrier = kEmitCompilerReadBarrier && (type == DataType::Type::kReference); LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, - object_array_get_with_read_barrier - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, + object_array_get_with_read_barrier + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall); if (object_array_get_with_read_barrier && kUseBakerReadBarrier) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } @@ -2385,7 +2385,7 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) { } void LocationsBuilderMIPS64::VisitArrayLength(HArrayLength* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); } @@ -2429,7 +2429,7 @@ void LocationsBuilderMIPS64::VisitArraySet(HArraySet* instruction) { CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue()); bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck(); - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( instruction, may_need_runtime_call_for_type_check ? LocationSummary::kCallOnSlowPath : @@ -2543,7 +2543,7 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) { SlowPathCodeMIPS64* slow_path = nullptr; if (may_need_runtime_call_for_type_check) { - slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathMIPS64(instruction); + slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathMIPS64(instruction); codegen_->AddSlowPath(slow_path); if (instruction->GetValueCanBeNull()) { Mips64Label non_zero; @@ -2700,7 +2700,7 @@ void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) { void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) { LocationSummary* locations = instruction->GetLocations(); BoundsCheckSlowPathMIPS64* slow_path = - new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS64(instruction); + new (GetGraph()->GetAllocator()) BoundsCheckSlowPathMIPS64(instruction); codegen_->AddSlowPath(slow_path); GpuRegister index = locations->InAt(0).AsRegister<GpuRegister>(); @@ -2751,7 +2751,8 @@ void LocationsBuilderMIPS64::VisitCheckCast(HCheckCast* instruction) { break; } - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind)); @@ -2791,8 +2792,8 @@ void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) { !instruction->CanThrowIntoCatchBlock(); } SlowPathCodeMIPS64* slow_path = - new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction, - is_type_check_slow_path_fatal); + new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS64(instruction, + is_type_check_slow_path_fatal); codegen_->AddSlowPath(slow_path); // Avoid this check if we know `obj` is not null. @@ -2946,7 +2947,7 @@ void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) { void LocationsBuilderMIPS64::VisitClinitCheck(HClinitCheck* check) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath); + new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath); locations->SetInAt(0, Location::RequiresRegister()); if (check->HasUses()) { locations->SetOut(Location::SameAsFirstInput()); @@ -2955,7 +2956,7 @@ void LocationsBuilderMIPS64::VisitClinitCheck(HClinitCheck* check) { void InstructionCodeGeneratorMIPS64::VisitClinitCheck(HClinitCheck* check) { // We assume the class is not null. - SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64( + SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS64( check->GetLoadClass(), check, check->GetDexPc(), @@ -2968,7 +2969,7 @@ void InstructionCodeGeneratorMIPS64::VisitClinitCheck(HClinitCheck* check) { void LocationsBuilderMIPS64::VisitCompare(HCompare* compare) { DataType::Type in_type = compare->InputAt(0)->GetType(); - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(compare); switch (in_type) { case DataType::Type::kBool: @@ -3088,7 +3089,7 @@ void InstructionCodeGeneratorMIPS64::VisitCompare(HCompare* instruction) { } void LocationsBuilderMIPS64::HandleCondition(HCondition* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); switch (instruction->InputAt(0)->GetType()) { default: case DataType::Type::kInt64: @@ -3376,7 +3377,7 @@ void InstructionCodeGeneratorMIPS64::GenerateDivRemIntegral(HBinaryOperation* in void LocationsBuilderMIPS64::VisitDiv(HDiv* div) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(div, LocationSummary::kNoCall); switch (div->GetResultType()) { case DataType::Type::kInt32: case DataType::Type::kInt64: @@ -3429,7 +3430,7 @@ void LocationsBuilderMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) { void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) { SlowPathCodeMIPS64* slow_path = - new (GetGraph()->GetArena()) DivZeroCheckSlowPathMIPS64(instruction); + new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathMIPS64(instruction); codegen_->AddSlowPath(slow_path); Location value = instruction->GetLocations()->InAt(0); @@ -3455,7 +3456,7 @@ void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instructio void LocationsBuilderMIPS64::VisitDoubleConstant(HDoubleConstant* constant) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall); locations->SetOut(Location::ConstantLocation(constant)); } @@ -3472,7 +3473,7 @@ void InstructionCodeGeneratorMIPS64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) { void LocationsBuilderMIPS64::VisitFloatConstant(HFloatConstant* constant) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall); locations->SetOut(Location::ConstantLocation(constant)); } @@ -4255,7 +4256,7 @@ void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruc } void LocationsBuilderMIPS64::VisitIf(HIf* if_instr) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr); if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) { locations->SetInAt(0, Location::RequiresRegister()); } @@ -4272,7 +4273,7 @@ void InstructionCodeGeneratorMIPS64::VisitIf(HIf* if_instr) { } void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) { - LocationSummary* locations = new (GetGraph()->GetArena()) + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath); InvokeRuntimeCallingConvention calling_convention; RegisterSet caller_saves = RegisterSet::Empty(); @@ -4594,7 +4595,7 @@ void InstructionCodeGeneratorMIPS64::GenConditionalMove(HSelect* select) { } void LocationsBuilderMIPS64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) { - LocationSummary* locations = new (GetGraph()->GetArena()) + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(flag, LocationSummary::kNoCall); locations->SetOut(Location::RequiresRegister()); } @@ -4607,7 +4608,7 @@ void InstructionCodeGeneratorMIPS64::VisitShouldDeoptimizeFlag(HShouldDeoptimize } void LocationsBuilderMIPS64::VisitSelect(HSelect* select) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select); CanMoveConditionally(select, locations); } @@ -4627,7 +4628,7 @@ void InstructionCodeGeneratorMIPS64::VisitSelect(HSelect* select) { } void LocationsBuilderMIPS64::VisitNativeDebugInfo(HNativeDebugInfo* info) { - new (GetGraph()->GetArena()) LocationSummary(info); + new (GetGraph()->GetAllocator()) LocationSummary(info); } void InstructionCodeGeneratorMIPS64::VisitNativeDebugInfo(HNativeDebugInfo*) { @@ -4643,7 +4644,7 @@ void LocationsBuilderMIPS64::HandleFieldGet(HInstruction* instruction, DataType::Type field_type = field_info.GetFieldType(); bool object_field_get_with_read_barrier = kEmitCompilerReadBarrier && (field_type == DataType::Type::kReference); - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( instruction, object_field_get_with_read_barrier ? LocationSummary::kCallOnSlowPath @@ -4761,7 +4762,7 @@ void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction, void LocationsBuilderMIPS64::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info ATTRIBUTE_UNUSED) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); if (DataType::IsFloatingPointType(instruction->InputAt(1)->GetType())) { locations->SetInAt(1, FpuRegisterOrConstantForStore(instruction->InputAt(1))); @@ -5050,7 +5051,7 @@ void InstructionCodeGeneratorMIPS64::GenerateGcRootFieldLoad(HInstruction* instr // Slow path marking the GC root `root`. Location temp = Location::RegisterLocation(T9); SlowPathCodeMIPS64* slow_path = - new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS64( + new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS64( instruction, root, /*entrypoint*/ temp); @@ -5335,14 +5336,14 @@ void CodeGeneratorMIPS64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction // above are expected to be null in this code path. DCHECK_EQ(offset, 0u); DCHECK_EQ(scale_factor, ScaleFactor::TIMES_1); - slow_path = new (GetGraph()->GetArena()) + slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathMIPS64(instruction, ref, obj, /* field_offset */ index, temp_reg); } else { - slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS64(instruction, ref); + slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS64(instruction, ref); } AddSlowPath(slow_path); @@ -5378,7 +5379,7 @@ void CodeGeneratorMIPS64::GenerateReadBarrierSlow(HInstruction* instruction, // not used by the artReadBarrierSlow entry point. // // TODO: Unpoison `ref` when it is used by artReadBarrierSlow. - SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) + SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetAllocator()) ReadBarrierForHeapReferenceSlowPathMIPS64(instruction, out, ref, obj, offset, index); AddSlowPath(slow_path); @@ -5414,7 +5415,7 @@ void CodeGeneratorMIPS64::GenerateReadBarrierForRootSlow(HInstruction* instructi // Note that GC roots are not affected by heap poisoning, so we do // not need to do anything special for this here. SlowPathCodeMIPS64* slow_path = - new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathMIPS64(instruction, out, root); + new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathMIPS64(instruction, out, root); AddSlowPath(slow_path); __ Bc(slow_path->GetEntryLabel()); @@ -5441,7 +5442,8 @@ void LocationsBuilderMIPS64::VisitInstanceOf(HInstanceOf* instruction) { break; } - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind); if (baker_read_barrier_slow_path) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } @@ -5583,8 +5585,8 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) { maybe_temp_loc, kWithoutReadBarrier); DCHECK(locations->OnlyCallsOnSlowPath()); - slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction, - /* is_fatal */ false); + slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS64(instruction, + /* is_fatal */ false); codegen_->AddSlowPath(slow_path); __ Bnec(out, cls, slow_path->GetEntryLabel()); __ LoadConst32(out, 1); @@ -5612,8 +5614,8 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) { // call to the runtime not using a type checking slow path). // This should also be beneficial for the other cases above. DCHECK(locations->OnlyCallsOnSlowPath()); - slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction, - /* is_fatal */ false); + slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS64(instruction, + /* is_fatal */ false); codegen_->AddSlowPath(slow_path); __ Bc(slow_path->GetEntryLabel()); break; @@ -5628,7 +5630,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) { } void LocationsBuilderMIPS64::VisitIntConstant(HIntConstant* constant) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant); locations->SetOut(Location::ConstantLocation(constant)); } @@ -5637,7 +5639,7 @@ void InstructionCodeGeneratorMIPS64::VisitIntConstant(HIntConstant* constant ATT } void LocationsBuilderMIPS64::VisitNullConstant(HNullConstant* constant) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant); locations->SetOut(Location::ConstantLocation(constant)); } @@ -5952,7 +5954,7 @@ void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) { LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier) ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind); if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } @@ -6081,7 +6083,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S if (generate_null_check || cls->MustGenerateClinitCheck()) { DCHECK(cls->CanCallRuntime()); - SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64( + SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS64( cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck(), bss_info_high); codegen_->AddSlowPath(slow_path); if (generate_null_check) { @@ -6101,7 +6103,7 @@ static int32_t GetExceptionTlsOffset() { void LocationsBuilderMIPS64::VisitLoadException(HLoadException* load) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall); locations->SetOut(Location::RequiresRegister()); } @@ -6111,7 +6113,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadException(HLoadException* load) { } void LocationsBuilderMIPS64::VisitClearException(HClearException* clear) { - new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall); } void InstructionCodeGeneratorMIPS64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) { @@ -6121,7 +6123,7 @@ void InstructionCodeGeneratorMIPS64::VisitClearException(HClearException* clear void LocationsBuilderMIPS64::VisitLoadString(HLoadString* load) { HLoadString::LoadKind load_kind = load->GetLoadKind(); LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load); - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind); if (load_kind == HLoadString::LoadKind::kRuntimeCall) { InvokeRuntimeCallingConvention calling_convention; locations->SetOut(Location::RegisterLocation(calling_convention.GetRegisterAt(0))); @@ -6199,7 +6201,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA kCompilerReadBarrierOption, &info_low->label); SlowPathCodeMIPS64* slow_path = - new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load, info_high); + new (GetGraph()->GetAllocator()) LoadStringSlowPathMIPS64(load, info_high); codegen_->AddSlowPath(slow_path); __ Beqzc(out, slow_path->GetEntryLabel()); __ Bind(slow_path->GetExitLabel()); @@ -6227,7 +6229,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA } void LocationsBuilderMIPS64::VisitLongConstant(HLongConstant* constant) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant); locations->SetOut(Location::ConstantLocation(constant)); } @@ -6236,8 +6238,8 @@ void InstructionCodeGeneratorMIPS64::VisitLongConstant(HLongConstant* constant A } void LocationsBuilderMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); } @@ -6255,7 +6257,7 @@ void InstructionCodeGeneratorMIPS64::VisitMonitorOperation(HMonitorOperation* in void LocationsBuilderMIPS64::VisitMul(HMul* mul) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall); switch (mul->GetResultType()) { case DataType::Type::kInt32: case DataType::Type::kInt64: @@ -6310,7 +6312,7 @@ void InstructionCodeGeneratorMIPS64::VisitMul(HMul* instruction) { void LocationsBuilderMIPS64::VisitNeg(HNeg* neg) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall); switch (neg->GetResultType()) { case DataType::Type::kInt32: case DataType::Type::kInt64: @@ -6360,8 +6362,8 @@ void InstructionCodeGeneratorMIPS64::VisitNeg(HNeg* instruction) { } void LocationsBuilderMIPS64::VisitNewArray(HNewArray* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); InvokeRuntimeCallingConvention calling_convention; locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kReference)); locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); @@ -6379,8 +6381,8 @@ void InstructionCodeGeneratorMIPS64::VisitNewArray(HNewArray* instruction) { } void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); InvokeRuntimeCallingConvention calling_convention; if (instruction->IsStringAlloc()) { locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument)); @@ -6410,7 +6412,7 @@ void InstructionCodeGeneratorMIPS64::VisitNewInstance(HNewInstance* instruction) } void LocationsBuilderMIPS64::VisitNot(HNot* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); } @@ -6434,7 +6436,7 @@ void InstructionCodeGeneratorMIPS64::VisitNot(HNot* instruction) { } void LocationsBuilderMIPS64::VisitBooleanNot(HBooleanNot* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); } @@ -6462,7 +6464,8 @@ void CodeGeneratorMIPS64::GenerateImplicitNullCheck(HNullCheck* instruction) { } void CodeGeneratorMIPS64::GenerateExplicitNullCheck(HNullCheck* instruction) { - SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathMIPS64(instruction); + SlowPathCodeMIPS64* slow_path = + new (GetGraph()->GetAllocator()) NullCheckSlowPathMIPS64(instruction); AddSlowPath(slow_path); Location obj = instruction->GetLocations()->InAt(0); @@ -6491,7 +6494,7 @@ void InstructionCodeGeneratorMIPS64::VisitParallelMove(HParallelMove* instructio } void LocationsBuilderMIPS64::VisitParameterValue(HParameterValue* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); Location location = parameter_visitor_.GetNextLocation(instruction->GetType()); if (location.IsStackSlot()) { location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize()); @@ -6508,7 +6511,7 @@ void InstructionCodeGeneratorMIPS64::VisitParameterValue(HParameterValue* instru void LocationsBuilderMIPS64::VisitCurrentMethod(HCurrentMethod* instruction) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument)); } @@ -6518,7 +6521,7 @@ void InstructionCodeGeneratorMIPS64::VisitCurrentMethod(HCurrentMethod* instruct } void LocationsBuilderMIPS64::VisitPhi(HPhi* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) { locations->SetInAt(i, Location::Any()); } @@ -6534,7 +6537,7 @@ void LocationsBuilderMIPS64::VisitRem(HRem* rem) { LocationSummary::CallKind call_kind = DataType::IsFloatingPointType(type) ? LocationSummary::kCallOnMainOnly : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(rem, call_kind); switch (type) { case DataType::Type::kInt32: @@ -6602,7 +6605,7 @@ void InstructionCodeGeneratorMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_b } void LocationsBuilderMIPS64::VisitReturn(HReturn* ret) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(ret); DataType::Type return_type = ret->InputAt(0)->GetType(); locations->SetInAt(0, Mips64ReturnLocation(return_type)); } @@ -6736,8 +6739,8 @@ void InstructionCodeGeneratorMIPS64::VisitUnresolvedStaticFieldSet( } void LocationsBuilderMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnSlowPath); // In suspend check slow path, usually there are no caller-save registers at all. // If SIMD instructions are present, however, we force spilling all live SIMD // registers in full width (since the runtime only saves/restores lower part). @@ -6760,8 +6763,8 @@ void InstructionCodeGeneratorMIPS64::VisitSuspendCheck(HSuspendCheck* instructio } void LocationsBuilderMIPS64::VisitThrow(HThrow* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); } @@ -6782,7 +6785,7 @@ void LocationsBuilderMIPS64::VisitTypeConversion(HTypeConversion* conversion) { LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type; } - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(conversion); if (DataType::IsFloatingPointType(input_type)) { locations->SetInAt(0, Location::RequiresFpuRegister()); @@ -7014,7 +7017,7 @@ void InstructionCodeGeneratorMIPS64::VisitAboveOrEqual(HAboveOrEqual* comp) { // Simple implementation of packed switch - generate cascaded compare/jumps. void LocationsBuilderMIPS64::VisitPackedSwitch(HPackedSwitch* switch_instr) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); } @@ -7110,7 +7113,7 @@ void InstructionCodeGeneratorMIPS64::VisitPackedSwitch(HPackedSwitch* switch_ins void LocationsBuilderMIPS64::VisitClassTableGet(HClassTableGet* instruction) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister()); } diff --git a/compiler/optimizing/code_generator_vector_arm64.cc b/compiler/optimizing/code_generator_vector_arm64.cc index b2aec1e66d..10aced02c3 100644 --- a/compiler/optimizing/code_generator_vector_arm64.cc +++ b/compiler/optimizing/code_generator_vector_arm64.cc @@ -38,7 +38,7 @@ using helpers::XRegisterFrom; #define __ GetVIXLAssembler()-> void LocationsBuilderARM64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); HInstruction* input = instruction->InputAt(0); switch (instruction->GetPackedType()) { case DataType::Type::kBool: @@ -131,7 +131,7 @@ void InstructionCodeGeneratorARM64::VisitVecReplicateScalar(HVecReplicateScalar* } void LocationsBuilderARM64::VisitVecExtractScalar(HVecExtractScalar* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kBool: case DataType::Type::kUint8: @@ -179,8 +179,8 @@ void InstructionCodeGeneratorARM64::VisitVecExtractScalar(HVecExtractScalar* ins } // Helper to set up locations for vector unary operations. -static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) { - LocationSummary* locations = new (arena) LocationSummary(instruction); +static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kBool: locations->SetInAt(0, Location::RequiresFpuRegister()); @@ -206,7 +206,7 @@ static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* in } void LocationsBuilderARM64::VisitVecReduce(HVecReduce* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARM64::VisitVecReduce(HVecReduce* instruction) { @@ -246,7 +246,7 @@ void InstructionCodeGeneratorARM64::VisitVecReduce(HVecReduce* instruction) { } void LocationsBuilderARM64::VisitVecCnv(HVecCnv* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARM64::VisitVecCnv(HVecCnv* instruction) { @@ -264,7 +264,7 @@ void InstructionCodeGeneratorARM64::VisitVecCnv(HVecCnv* instruction) { } void LocationsBuilderARM64::VisitVecNeg(HVecNeg* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARM64::VisitVecNeg(HVecNeg* instruction) { @@ -305,7 +305,7 @@ void InstructionCodeGeneratorARM64::VisitVecNeg(HVecNeg* instruction) { } void LocationsBuilderARM64::VisitVecAbs(HVecAbs* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARM64::VisitVecAbs(HVecAbs* instruction) { @@ -344,7 +344,7 @@ void InstructionCodeGeneratorARM64::VisitVecAbs(HVecAbs* instruction) { } void LocationsBuilderARM64::VisitVecNot(HVecNot* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARM64::VisitVecNot(HVecNot* instruction) { @@ -372,8 +372,8 @@ void InstructionCodeGeneratorARM64::VisitVecNot(HVecNot* instruction) { } // Helper to set up locations for vector binary operations. -static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) { - LocationSummary* locations = new (arena) LocationSummary(instruction); +static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kBool: case DataType::Type::kUint8: @@ -395,7 +395,7 @@ static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* } void LocationsBuilderARM64::VisitVecAdd(HVecAdd* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARM64::VisitVecAdd(HVecAdd* instruction) { @@ -437,7 +437,7 @@ void InstructionCodeGeneratorARM64::VisitVecAdd(HVecAdd* instruction) { } void LocationsBuilderARM64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARM64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) { @@ -477,7 +477,7 @@ void InstructionCodeGeneratorARM64::VisitVecHalvingAdd(HVecHalvingAdd* instructi } void LocationsBuilderARM64::VisitVecSub(HVecSub* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARM64::VisitVecSub(HVecSub* instruction) { @@ -519,7 +519,7 @@ void InstructionCodeGeneratorARM64::VisitVecSub(HVecSub* instruction) { } void LocationsBuilderARM64::VisitVecMul(HVecMul* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARM64::VisitVecMul(HVecMul* instruction) { @@ -557,7 +557,7 @@ void InstructionCodeGeneratorARM64::VisitVecMul(HVecMul* instruction) { } void LocationsBuilderARM64::VisitVecDiv(HVecDiv* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARM64::VisitVecDiv(HVecDiv* instruction) { @@ -581,7 +581,7 @@ void InstructionCodeGeneratorARM64::VisitVecDiv(HVecDiv* instruction) { } void LocationsBuilderARM64::VisitVecMin(HVecMin* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARM64::VisitVecMin(HVecMin* instruction) { @@ -631,7 +631,7 @@ void InstructionCodeGeneratorARM64::VisitVecMin(HVecMin* instruction) { } void LocationsBuilderARM64::VisitVecMax(HVecMax* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARM64::VisitVecMax(HVecMax* instruction) { @@ -682,7 +682,7 @@ void InstructionCodeGeneratorARM64::VisitVecMax(HVecMax* instruction) { void LocationsBuilderARM64::VisitVecAnd(HVecAnd* instruction) { // TODO: Allow constants supported by BIC (vector, immediate). - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARM64::VisitVecAnd(HVecAnd* instruction) { @@ -718,7 +718,7 @@ void InstructionCodeGeneratorARM64::VisitVecAndNot(HVecAndNot* instruction) { } void LocationsBuilderARM64::VisitVecOr(HVecOr* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARM64::VisitVecOr(HVecOr* instruction) { @@ -745,7 +745,7 @@ void InstructionCodeGeneratorARM64::VisitVecOr(HVecOr* instruction) { } void LocationsBuilderARM64::VisitVecXor(HVecXor* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARM64::VisitVecXor(HVecXor* instruction) { @@ -772,8 +772,8 @@ void InstructionCodeGeneratorARM64::VisitVecXor(HVecXor* instruction) { } // Helper to set up locations for vector shift operations. -static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) { - LocationSummary* locations = new (arena) LocationSummary(instruction); +static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kUint8: case DataType::Type::kInt8: @@ -792,7 +792,7 @@ static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* } void LocationsBuilderARM64::VisitVecShl(HVecShl* instruction) { - CreateVecShiftLocations(GetGraph()->GetArena(), instruction); + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARM64::VisitVecShl(HVecShl* instruction) { @@ -826,7 +826,7 @@ void InstructionCodeGeneratorARM64::VisitVecShl(HVecShl* instruction) { } void LocationsBuilderARM64::VisitVecShr(HVecShr* instruction) { - CreateVecShiftLocations(GetGraph()->GetArena(), instruction); + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARM64::VisitVecShr(HVecShr* instruction) { @@ -860,7 +860,7 @@ void InstructionCodeGeneratorARM64::VisitVecShr(HVecShr* instruction) { } void LocationsBuilderARM64::VisitVecUShr(HVecUShr* instruction) { - CreateVecShiftLocations(GetGraph()->GetArena(), instruction); + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARM64::VisitVecUShr(HVecUShr* instruction) { @@ -894,7 +894,7 @@ void InstructionCodeGeneratorARM64::VisitVecUShr(HVecUShr* instruction) { } void LocationsBuilderARM64::VisitVecSetScalars(HVecSetScalars* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); DCHECK_EQ(1u, instruction->InputCount()); // only one input currently implemented @@ -967,8 +967,8 @@ void InstructionCodeGeneratorARM64::VisitVecSetScalars(HVecSetScalars* instructi } // Helper to set up locations for vector accumulations. -static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) { - LocationSummary* locations = new (arena) LocationSummary(instruction); +static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kUint8: case DataType::Type::kInt8: @@ -988,7 +988,7 @@ static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instru } void LocationsBuilderARM64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) { - CreateVecAccumLocations(GetGraph()->GetArena(), instruction); + CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction); } // Some early revisions of the Cortex-A53 have an erratum (835769) whereby it is possible for a @@ -1036,7 +1036,7 @@ void InstructionCodeGeneratorARM64::VisitVecMultiplyAccumulate(HVecMultiplyAccum } void LocationsBuilderARM64::VisitVecSADAccumulate(HVecSADAccumulate* instruction) { - CreateVecAccumLocations(GetGraph()->GetArena(), instruction); + CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction); // Some conversions require temporary registers. LocationSummary* locations = instruction->GetLocations(); HVecOperation* a = instruction->InputAt(1)->AsVecOperation(); @@ -1216,10 +1216,10 @@ void InstructionCodeGeneratorARM64::VisitVecSADAccumulate(HVecSADAccumulate* ins } // Helper to set up locations for vector memory operations. -static void CreateVecMemLocations(ArenaAllocator* arena, +static void CreateVecMemLocations(ArenaAllocator* allocator, HVecMemoryOperation* instruction, bool is_load) { - LocationSummary* locations = new (arena) LocationSummary(instruction); + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kBool: case DataType::Type::kUint8: @@ -1281,7 +1281,7 @@ MemOperand InstructionCodeGeneratorARM64::VecAddress( } void LocationsBuilderARM64::VisitVecLoad(HVecLoad* instruction) { - CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ true); + CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ true); } void InstructionCodeGeneratorARM64::VisitVecLoad(HVecLoad* instruction) { @@ -1339,7 +1339,7 @@ void InstructionCodeGeneratorARM64::VisitVecLoad(HVecLoad* instruction) { } void LocationsBuilderARM64::VisitVecStore(HVecStore* instruction) { - CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ false); + CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ false); } void InstructionCodeGeneratorARM64::VisitVecStore(HVecStore* instruction) { diff --git a/compiler/optimizing/code_generator_vector_arm_vixl.cc b/compiler/optimizing/code_generator_vector_arm_vixl.cc index df757524a1..f84408da49 100644 --- a/compiler/optimizing/code_generator_vector_arm_vixl.cc +++ b/compiler/optimizing/code_generator_vector_arm_vixl.cc @@ -33,7 +33,7 @@ using helpers::RegisterFrom; #define __ GetVIXLAssembler()-> void LocationsBuilderARMVIXL::VisitVecReplicateScalar(HVecReplicateScalar* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kBool: case DataType::Type::kUint8: @@ -84,8 +84,8 @@ void InstructionCodeGeneratorARMVIXL::VisitVecExtractScalar(HVecExtractScalar* i } // Helper to set up locations for vector unary operations. -static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) { - LocationSummary* locations = new (arena) LocationSummary(instruction); +static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kBool: locations->SetInAt(0, Location::RequiresFpuRegister()); @@ -108,7 +108,7 @@ static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* in } void LocationsBuilderARMVIXL::VisitVecReduce(HVecReduce* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARMVIXL::VisitVecReduce(HVecReduce* instruction) { @@ -116,7 +116,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecReduce(HVecReduce* instruction) { } void LocationsBuilderARMVIXL::VisitVecCnv(HVecCnv* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARMVIXL::VisitVecCnv(HVecCnv* instruction) { @@ -124,7 +124,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecCnv(HVecCnv* instruction) { } void LocationsBuilderARMVIXL::VisitVecNeg(HVecNeg* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARMVIXL::VisitVecNeg(HVecNeg* instruction) { @@ -153,7 +153,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecNeg(HVecNeg* instruction) { } void LocationsBuilderARMVIXL::VisitVecAbs(HVecAbs* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARMVIXL::VisitVecAbs(HVecAbs* instruction) { @@ -180,7 +180,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecAbs(HVecAbs* instruction) { } void LocationsBuilderARMVIXL::VisitVecNot(HVecNot* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARMVIXL::VisitVecNot(HVecNot* instruction) { @@ -207,8 +207,8 @@ void InstructionCodeGeneratorARMVIXL::VisitVecNot(HVecNot* instruction) { } // Helper to set up locations for vector binary operations. -static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) { - LocationSummary* locations = new (arena) LocationSummary(instruction); +static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kBool: case DataType::Type::kUint8: @@ -227,7 +227,7 @@ static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* } void LocationsBuilderARMVIXL::VisitVecAdd(HVecAdd* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARMVIXL::VisitVecAdd(HVecAdd* instruction) { @@ -257,7 +257,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecAdd(HVecAdd* instruction) { } void LocationsBuilderARMVIXL::VisitVecHalvingAdd(HVecHalvingAdd* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARMVIXL::VisitVecHalvingAdd(HVecHalvingAdd* instruction) { @@ -297,7 +297,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecHalvingAdd(HVecHalvingAdd* instruc } void LocationsBuilderARMVIXL::VisitVecSub(HVecSub* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARMVIXL::VisitVecSub(HVecSub* instruction) { @@ -327,7 +327,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecSub(HVecSub* instruction) { } void LocationsBuilderARMVIXL::VisitVecMul(HVecMul* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARMVIXL::VisitVecMul(HVecMul* instruction) { @@ -357,7 +357,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecMul(HVecMul* instruction) { } void LocationsBuilderARMVIXL::VisitVecDiv(HVecDiv* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARMVIXL::VisitVecDiv(HVecDiv* instruction) { @@ -365,7 +365,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecDiv(HVecDiv* instruction) { } void LocationsBuilderARMVIXL::VisitVecMin(HVecMin* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARMVIXL::VisitVecMin(HVecMin* instruction) { @@ -405,7 +405,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecMin(HVecMin* instruction) { } void LocationsBuilderARMVIXL::VisitVecMax(HVecMax* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARMVIXL::VisitVecMax(HVecMax* instruction) { @@ -446,7 +446,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecMax(HVecMax* instruction) { void LocationsBuilderARMVIXL::VisitVecAnd(HVecAnd* instruction) { // TODO: Allow constants supported by VAND (immediate). - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARMVIXL::VisitVecAnd(HVecAnd* instruction) { @@ -470,7 +470,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecAnd(HVecAnd* instruction) { } void LocationsBuilderARMVIXL::VisitVecAndNot(HVecAndNot* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARMVIXL::VisitVecAndNot(HVecAndNot* instruction) { @@ -478,7 +478,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecAndNot(HVecAndNot* instruction) { } void LocationsBuilderARMVIXL::VisitVecOr(HVecOr* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARMVIXL::VisitVecOr(HVecOr* instruction) { @@ -502,7 +502,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecOr(HVecOr* instruction) { } void LocationsBuilderARMVIXL::VisitVecXor(HVecXor* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARMVIXL::VisitVecXor(HVecXor* instruction) { @@ -526,8 +526,8 @@ void InstructionCodeGeneratorARMVIXL::VisitVecXor(HVecXor* instruction) { } // Helper to set up locations for vector shift operations. -static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) { - LocationSummary* locations = new (arena) LocationSummary(instruction); +static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kUint8: case DataType::Type::kInt8: @@ -545,7 +545,7 @@ static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* } void LocationsBuilderARMVIXL::VisitVecShl(HVecShl* instruction) { - CreateVecShiftLocations(GetGraph()->GetArena(), instruction); + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARMVIXL::VisitVecShl(HVecShl* instruction) { @@ -575,7 +575,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecShl(HVecShl* instruction) { } void LocationsBuilderARMVIXL::VisitVecShr(HVecShr* instruction) { - CreateVecShiftLocations(GetGraph()->GetArena(), instruction); + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARMVIXL::VisitVecShr(HVecShr* instruction) { @@ -605,7 +605,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecShr(HVecShr* instruction) { } void LocationsBuilderARMVIXL::VisitVecUShr(HVecUShr* instruction) { - CreateVecShiftLocations(GetGraph()->GetArena(), instruction); + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARMVIXL::VisitVecUShr(HVecUShr* instruction) { @@ -643,8 +643,8 @@ void InstructionCodeGeneratorARMVIXL::VisitVecSetScalars(HVecSetScalars* instruc } // Helper to set up locations for vector accumulations. -static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) { - LocationSummary* locations = new (arena) LocationSummary(instruction); +static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kUint8: case DataType::Type::kInt8: @@ -664,7 +664,7 @@ static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instru } void LocationsBuilderARMVIXL::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) { - CreateVecAccumLocations(GetGraph()->GetArena(), instruction); + CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARMVIXL::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) { @@ -672,7 +672,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecMultiplyAccumulate(HVecMultiplyAcc } void LocationsBuilderARMVIXL::VisitVecSADAccumulate(HVecSADAccumulate* instruction) { - CreateVecAccumLocations(GetGraph()->GetArena(), instruction); + CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorARMVIXL::VisitVecSADAccumulate(HVecSADAccumulate* instruction) { @@ -686,10 +686,10 @@ static bool IsWordAligned(HVecMemoryOperation* instruction) { } // Helper to set up locations for vector memory operations. -static void CreateVecMemLocations(ArenaAllocator* arena, +static void CreateVecMemLocations(ArenaAllocator* allocator, HVecMemoryOperation* instruction, bool is_load) { - LocationSummary* locations = new (arena) LocationSummary(instruction); + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kBool: case DataType::Type::kUint8: @@ -767,7 +767,7 @@ AlignedMemOperand InstructionCodeGeneratorARMVIXL::VecAddressUnaligned( } void LocationsBuilderARMVIXL::VisitVecLoad(HVecLoad* instruction) { - CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ true); + CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ true); } void InstructionCodeGeneratorARMVIXL::VisitVecLoad(HVecLoad* instruction) { @@ -818,7 +818,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecLoad(HVecLoad* instruction) { } void LocationsBuilderARMVIXL::VisitVecStore(HVecStore* instruction) { - CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ false); + CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ false); } void InstructionCodeGeneratorARMVIXL::VisitVecStore(HVecStore* instruction) { diff --git a/compiler/optimizing/code_generator_vector_mips.cc b/compiler/optimizing/code_generator_vector_mips.cc index e8c515761c..c5a39ff882 100644 --- a/compiler/optimizing/code_generator_vector_mips.cc +++ b/compiler/optimizing/code_generator_vector_mips.cc @@ -24,7 +24,7 @@ namespace mips { #define __ down_cast<MipsAssembler*>(GetAssembler())-> // NOLINT void LocationsBuilderMIPS::VisitVecReplicateScalar(HVecReplicateScalar* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kBool: case DataType::Type::kUint8: @@ -99,8 +99,8 @@ void InstructionCodeGeneratorMIPS::VisitVecExtractScalar(HVecExtractScalar* inst } // Helper to set up locations for vector unary operations. -static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) { - LocationSummary* locations = new (arena) LocationSummary(instruction); +static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kBool: locations->SetInAt(0, Location::RequiresFpuRegister()); @@ -129,7 +129,7 @@ static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* in } void LocationsBuilderMIPS::VisitVecReduce(HVecReduce* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS::VisitVecReduce(HVecReduce* instruction) { @@ -137,7 +137,7 @@ void InstructionCodeGeneratorMIPS::VisitVecReduce(HVecReduce* instruction) { } void LocationsBuilderMIPS::VisitVecCnv(HVecCnv* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS::VisitVecCnv(HVecCnv* instruction) { @@ -155,7 +155,7 @@ void InstructionCodeGeneratorMIPS::VisitVecCnv(HVecCnv* instruction) { } void LocationsBuilderMIPS::VisitVecNeg(HVecNeg* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS::VisitVecNeg(HVecNeg* instruction) { @@ -202,7 +202,7 @@ void InstructionCodeGeneratorMIPS::VisitVecNeg(HVecNeg* instruction) { } void LocationsBuilderMIPS::VisitVecAbs(HVecAbs* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS::VisitVecAbs(HVecAbs* instruction) { @@ -249,7 +249,7 @@ void InstructionCodeGeneratorMIPS::VisitVecAbs(HVecAbs* instruction) { } void LocationsBuilderMIPS::VisitVecNot(HVecNot* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS::VisitVecNot(HVecNot* instruction) { @@ -281,8 +281,8 @@ void InstructionCodeGeneratorMIPS::VisitVecNot(HVecNot* instruction) { } // Helper to set up locations for vector binary operations. -static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) { - LocationSummary* locations = new (arena) LocationSummary(instruction); +static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kBool: case DataType::Type::kUint8: @@ -304,7 +304,7 @@ static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* } void LocationsBuilderMIPS::VisitVecAdd(HVecAdd* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS::VisitVecAdd(HVecAdd* instruction) { @@ -346,7 +346,7 @@ void InstructionCodeGeneratorMIPS::VisitVecAdd(HVecAdd* instruction) { } void LocationsBuilderMIPS::VisitVecHalvingAdd(HVecHalvingAdd* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS::VisitVecHalvingAdd(HVecHalvingAdd* instruction) { @@ -386,7 +386,7 @@ void InstructionCodeGeneratorMIPS::VisitVecHalvingAdd(HVecHalvingAdd* instructio } void LocationsBuilderMIPS::VisitVecSub(HVecSub* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS::VisitVecSub(HVecSub* instruction) { @@ -428,7 +428,7 @@ void InstructionCodeGeneratorMIPS::VisitVecSub(HVecSub* instruction) { } void LocationsBuilderMIPS::VisitVecMul(HVecMul* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS::VisitVecMul(HVecMul* instruction) { @@ -470,7 +470,7 @@ void InstructionCodeGeneratorMIPS::VisitVecMul(HVecMul* instruction) { } void LocationsBuilderMIPS::VisitVecDiv(HVecDiv* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS::VisitVecDiv(HVecDiv* instruction) { @@ -494,7 +494,7 @@ void InstructionCodeGeneratorMIPS::VisitVecDiv(HVecDiv* instruction) { } void LocationsBuilderMIPS::VisitVecMin(HVecMin* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS::VisitVecMin(HVecMin* instruction) { @@ -554,7 +554,7 @@ void InstructionCodeGeneratorMIPS::VisitVecMin(HVecMin* instruction) { } void LocationsBuilderMIPS::VisitVecMax(HVecMax* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS::VisitVecMax(HVecMax* instruction) { @@ -614,7 +614,7 @@ void InstructionCodeGeneratorMIPS::VisitVecMax(HVecMax* instruction) { } void LocationsBuilderMIPS::VisitVecAnd(HVecAnd* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS::VisitVecAnd(HVecAnd* instruction) { @@ -643,7 +643,7 @@ void InstructionCodeGeneratorMIPS::VisitVecAnd(HVecAnd* instruction) { } void LocationsBuilderMIPS::VisitVecAndNot(HVecAndNot* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS::VisitVecAndNot(HVecAndNot* instruction) { @@ -651,7 +651,7 @@ void InstructionCodeGeneratorMIPS::VisitVecAndNot(HVecAndNot* instruction) { } void LocationsBuilderMIPS::VisitVecOr(HVecOr* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS::VisitVecOr(HVecOr* instruction) { @@ -680,7 +680,7 @@ void InstructionCodeGeneratorMIPS::VisitVecOr(HVecOr* instruction) { } void LocationsBuilderMIPS::VisitVecXor(HVecXor* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS::VisitVecXor(HVecXor* instruction) { @@ -709,8 +709,8 @@ void InstructionCodeGeneratorMIPS::VisitVecXor(HVecXor* instruction) { } // Helper to set up locations for vector shift operations. -static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) { - LocationSummary* locations = new (arena) LocationSummary(instruction); +static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kUint8: case DataType::Type::kInt8: @@ -729,7 +729,7 @@ static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* } void LocationsBuilderMIPS::VisitVecShl(HVecShl* instruction) { - CreateVecShiftLocations(GetGraph()->GetArena(), instruction); + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS::VisitVecShl(HVecShl* instruction) { @@ -763,7 +763,7 @@ void InstructionCodeGeneratorMIPS::VisitVecShl(HVecShl* instruction) { } void LocationsBuilderMIPS::VisitVecShr(HVecShr* instruction) { - CreateVecShiftLocations(GetGraph()->GetArena(), instruction); + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS::VisitVecShr(HVecShr* instruction) { @@ -797,7 +797,7 @@ void InstructionCodeGeneratorMIPS::VisitVecShr(HVecShr* instruction) { } void LocationsBuilderMIPS::VisitVecUShr(HVecUShr* instruction) { - CreateVecShiftLocations(GetGraph()->GetArena(), instruction); + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS::VisitVecUShr(HVecUShr* instruction) { @@ -839,8 +839,8 @@ void InstructionCodeGeneratorMIPS::VisitVecSetScalars(HVecSetScalars* instructio } // Helper to set up locations for vector accumulations. -static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) { - LocationSummary* locations = new (arena) LocationSummary(instruction); +static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kUint8: case DataType::Type::kInt8: @@ -860,7 +860,7 @@ static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instru } void LocationsBuilderMIPS::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) { - CreateVecAccumLocations(GetGraph()->GetArena(), instruction); + CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) { @@ -910,7 +910,7 @@ void InstructionCodeGeneratorMIPS::VisitVecMultiplyAccumulate(HVecMultiplyAccumu } void LocationsBuilderMIPS::VisitVecSADAccumulate(HVecSADAccumulate* instruction) { - CreateVecAccumLocations(GetGraph()->GetArena(), instruction); + CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS::VisitVecSADAccumulate(HVecSADAccumulate* instruction) { @@ -919,10 +919,10 @@ void InstructionCodeGeneratorMIPS::VisitVecSADAccumulate(HVecSADAccumulate* inst } // Helper to set up locations for vector memory operations. -static void CreateVecMemLocations(ArenaAllocator* arena, +static void CreateVecMemLocations(ArenaAllocator* allocator, HVecMemoryOperation* instruction, bool is_load) { - LocationSummary* locations = new (arena) LocationSummary(instruction); + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kBool: case DataType::Type::kUint8: @@ -980,7 +980,7 @@ int32_t InstructionCodeGeneratorMIPS::VecAddress(LocationSummary* locations, } void LocationsBuilderMIPS::VisitVecLoad(HVecLoad* instruction) { - CreateVecMemLocations(GetGraph()->GetArena(), instruction, /* is_load */ true); + CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ true); } void InstructionCodeGeneratorMIPS::VisitVecLoad(HVecLoad* instruction) { @@ -1023,7 +1023,7 @@ void InstructionCodeGeneratorMIPS::VisitVecLoad(HVecLoad* instruction) { } void LocationsBuilderMIPS::VisitVecStore(HVecStore* instruction) { - CreateVecMemLocations(GetGraph()->GetArena(), instruction, /* is_load */ false); + CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ false); } void InstructionCodeGeneratorMIPS::VisitVecStore(HVecStore* instruction) { diff --git a/compiler/optimizing/code_generator_vector_mips64.cc b/compiler/optimizing/code_generator_vector_mips64.cc index 7d69773ae6..e606df2158 100644 --- a/compiler/optimizing/code_generator_vector_mips64.cc +++ b/compiler/optimizing/code_generator_vector_mips64.cc @@ -29,7 +29,7 @@ VectorRegister VectorRegisterFrom(Location location) { } void LocationsBuilderMIPS64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kBool: case DataType::Type::kUint8: @@ -102,8 +102,8 @@ void InstructionCodeGeneratorMIPS64::VisitVecExtractScalar(HVecExtractScalar* in } // Helper to set up locations for vector unary operations. -static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) { - LocationSummary* locations = new (arena) LocationSummary(instruction); +static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kBool: locations->SetInAt(0, Location::RequiresFpuRegister()); @@ -132,7 +132,7 @@ static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* in } void LocationsBuilderMIPS64::VisitVecReduce(HVecReduce* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS64::VisitVecReduce(HVecReduce* instruction) { @@ -140,7 +140,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecReduce(HVecReduce* instruction) { } void LocationsBuilderMIPS64::VisitVecCnv(HVecCnv* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS64::VisitVecCnv(HVecCnv* instruction) { @@ -159,7 +159,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecCnv(HVecCnv* instruction) { } void LocationsBuilderMIPS64::VisitVecNeg(HVecNeg* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS64::VisitVecNeg(HVecNeg* instruction) { @@ -206,7 +206,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecNeg(HVecNeg* instruction) { } void LocationsBuilderMIPS64::VisitVecAbs(HVecAbs* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS64::VisitVecAbs(HVecAbs* instruction) { @@ -253,7 +253,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecAbs(HVecAbs* instruction) { } void LocationsBuilderMIPS64::VisitVecNot(HVecNot* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS64::VisitVecNot(HVecNot* instruction) { @@ -285,8 +285,8 @@ void InstructionCodeGeneratorMIPS64::VisitVecNot(HVecNot* instruction) { } // Helper to set up locations for vector binary operations. -static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) { - LocationSummary* locations = new (arena) LocationSummary(instruction); +static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kBool: case DataType::Type::kUint8: @@ -308,7 +308,7 @@ static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* } void LocationsBuilderMIPS64::VisitVecAdd(HVecAdd* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS64::VisitVecAdd(HVecAdd* instruction) { @@ -350,7 +350,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecAdd(HVecAdd* instruction) { } void LocationsBuilderMIPS64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) { @@ -390,7 +390,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecHalvingAdd(HVecHalvingAdd* instruct } void LocationsBuilderMIPS64::VisitVecSub(HVecSub* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS64::VisitVecSub(HVecSub* instruction) { @@ -432,7 +432,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecSub(HVecSub* instruction) { } void LocationsBuilderMIPS64::VisitVecMul(HVecMul* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS64::VisitVecMul(HVecMul* instruction) { @@ -474,7 +474,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecMul(HVecMul* instruction) { } void LocationsBuilderMIPS64::VisitVecDiv(HVecDiv* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS64::VisitVecDiv(HVecDiv* instruction) { @@ -498,7 +498,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecDiv(HVecDiv* instruction) { } void LocationsBuilderMIPS64::VisitVecMin(HVecMin* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS64::VisitVecMin(HVecMin* instruction) { @@ -558,7 +558,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecMin(HVecMin* instruction) { } void LocationsBuilderMIPS64::VisitVecMax(HVecMax* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS64::VisitVecMax(HVecMax* instruction) { @@ -618,7 +618,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecMax(HVecMax* instruction) { } void LocationsBuilderMIPS64::VisitVecAnd(HVecAnd* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS64::VisitVecAnd(HVecAnd* instruction) { @@ -647,7 +647,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecAnd(HVecAnd* instruction) { } void LocationsBuilderMIPS64::VisitVecAndNot(HVecAndNot* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS64::VisitVecAndNot(HVecAndNot* instruction) { @@ -655,7 +655,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecAndNot(HVecAndNot* instruction) { } void LocationsBuilderMIPS64::VisitVecOr(HVecOr* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS64::VisitVecOr(HVecOr* instruction) { @@ -684,7 +684,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecOr(HVecOr* instruction) { } void LocationsBuilderMIPS64::VisitVecXor(HVecXor* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS64::VisitVecXor(HVecXor* instruction) { @@ -713,8 +713,8 @@ void InstructionCodeGeneratorMIPS64::VisitVecXor(HVecXor* instruction) { } // Helper to set up locations for vector shift operations. -static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) { - LocationSummary* locations = new (arena) LocationSummary(instruction); +static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kUint8: case DataType::Type::kInt8: @@ -733,7 +733,7 @@ static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* } void LocationsBuilderMIPS64::VisitVecShl(HVecShl* instruction) { - CreateVecShiftLocations(GetGraph()->GetArena(), instruction); + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS64::VisitVecShl(HVecShl* instruction) { @@ -767,7 +767,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecShl(HVecShl* instruction) { } void LocationsBuilderMIPS64::VisitVecShr(HVecShr* instruction) { - CreateVecShiftLocations(GetGraph()->GetArena(), instruction); + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS64::VisitVecShr(HVecShr* instruction) { @@ -801,7 +801,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecShr(HVecShr* instruction) { } void LocationsBuilderMIPS64::VisitVecUShr(HVecUShr* instruction) { - CreateVecShiftLocations(GetGraph()->GetArena(), instruction); + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS64::VisitVecUShr(HVecUShr* instruction) { @@ -843,8 +843,8 @@ void InstructionCodeGeneratorMIPS64::VisitVecSetScalars(HVecSetScalars* instruct } // Helper to set up locations for vector accumulations. -static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) { - LocationSummary* locations = new (arena) LocationSummary(instruction); +static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kUint8: case DataType::Type::kInt8: @@ -864,7 +864,7 @@ static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instru } void LocationsBuilderMIPS64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) { - CreateVecAccumLocations(GetGraph()->GetArena(), instruction); + CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) { @@ -914,7 +914,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecMultiplyAccumulate(HVecMultiplyAccu } void LocationsBuilderMIPS64::VisitVecSADAccumulate(HVecSADAccumulate* instruction) { - CreateVecAccumLocations(GetGraph()->GetArena(), instruction); + CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorMIPS64::VisitVecSADAccumulate(HVecSADAccumulate* instruction) { @@ -923,10 +923,10 @@ void InstructionCodeGeneratorMIPS64::VisitVecSADAccumulate(HVecSADAccumulate* in } // Helper to set up locations for vector memory operations. -static void CreateVecMemLocations(ArenaAllocator* arena, +static void CreateVecMemLocations(ArenaAllocator* allocator, HVecMemoryOperation* instruction, bool is_load) { - LocationSummary* locations = new (arena) LocationSummary(instruction); + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kBool: case DataType::Type::kUint8: @@ -984,7 +984,7 @@ int32_t InstructionCodeGeneratorMIPS64::VecAddress(LocationSummary* locations, } void LocationsBuilderMIPS64::VisitVecLoad(HVecLoad* instruction) { - CreateVecMemLocations(GetGraph()->GetArena(), instruction, /* is_load */ true); + CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ true); } void InstructionCodeGeneratorMIPS64::VisitVecLoad(HVecLoad* instruction) { @@ -1027,7 +1027,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecLoad(HVecLoad* instruction) { } void LocationsBuilderMIPS64::VisitVecStore(HVecStore* instruction) { - CreateVecMemLocations(GetGraph()->GetArena(), instruction, /* is_load */ false); + CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ false); } void InstructionCodeGeneratorMIPS64::VisitVecStore(HVecStore* instruction) { diff --git a/compiler/optimizing/code_generator_vector_x86.cc b/compiler/optimizing/code_generator_vector_x86.cc index a2ef1b1be9..ad8128a5b1 100644 --- a/compiler/optimizing/code_generator_vector_x86.cc +++ b/compiler/optimizing/code_generator_vector_x86.cc @@ -26,7 +26,7 @@ namespace x86 { #define __ down_cast<X86Assembler*>(GetAssembler())-> // NOLINT void LocationsBuilderX86::VisitVecReplicateScalar(HVecReplicateScalar* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); HInstruction* input = instruction->InputAt(0); bool is_zero = IsZeroBitPattern(input); switch (instruction->GetPackedType()) { @@ -117,7 +117,7 @@ void InstructionCodeGeneratorX86::VisitVecReplicateScalar(HVecReplicateScalar* i } void LocationsBuilderX86::VisitVecExtractScalar(HVecExtractScalar* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kInt64: // Long needs extra temporary to store into the register pair. @@ -180,8 +180,8 @@ void InstructionCodeGeneratorX86::VisitVecExtractScalar(HVecExtractScalar* instr } // Helper to set up locations for vector unary operations. -static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) { - LocationSummary* locations = new (arena) LocationSummary(instruction); +static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kBool: case DataType::Type::kUint8: @@ -202,7 +202,7 @@ static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* in } void LocationsBuilderX86::VisitVecReduce(HVecReduce* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); // Long reduction or min/max require a temporary. if (instruction->GetPackedType() == DataType::Type::kInt64 || instruction->GetKind() == HVecReduce::kMin || @@ -269,7 +269,7 @@ void InstructionCodeGeneratorX86::VisitVecReduce(HVecReduce* instruction) { } void LocationsBuilderX86::VisitVecCnv(HVecCnv* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86::VisitVecCnv(HVecCnv* instruction) { @@ -287,7 +287,7 @@ void InstructionCodeGeneratorX86::VisitVecCnv(HVecCnv* instruction) { } void LocationsBuilderX86::VisitVecNeg(HVecNeg* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86::VisitVecNeg(HVecNeg* instruction) { @@ -334,7 +334,7 @@ void InstructionCodeGeneratorX86::VisitVecNeg(HVecNeg* instruction) { } void LocationsBuilderX86::VisitVecAbs(HVecAbs* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); // Integral-abs requires a temporary for the comparison. if (instruction->GetPackedType() == DataType::Type::kInt32) { instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister()); @@ -375,7 +375,7 @@ void InstructionCodeGeneratorX86::VisitVecAbs(HVecAbs* instruction) { } void LocationsBuilderX86::VisitVecNot(HVecNot* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); // Boolean-not requires a temporary to construct the 16 x one. if (instruction->GetPackedType() == DataType::Type::kBool) { instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister()); @@ -424,8 +424,8 @@ void InstructionCodeGeneratorX86::VisitVecNot(HVecNot* instruction) { } // Helper to set up locations for vector binary operations. -static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) { - LocationSummary* locations = new (arena) LocationSummary(instruction); +static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kBool: case DataType::Type::kUint8: @@ -447,7 +447,7 @@ static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* } void LocationsBuilderX86::VisitVecAdd(HVecAdd* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86::VisitVecAdd(HVecAdd* instruction) { @@ -489,7 +489,7 @@ void InstructionCodeGeneratorX86::VisitVecAdd(HVecAdd* instruction) { } void LocationsBuilderX86::VisitVecHalvingAdd(HVecHalvingAdd* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86::VisitVecHalvingAdd(HVecHalvingAdd* instruction) { @@ -516,7 +516,7 @@ void InstructionCodeGeneratorX86::VisitVecHalvingAdd(HVecHalvingAdd* instruction } void LocationsBuilderX86::VisitVecSub(HVecSub* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86::VisitVecSub(HVecSub* instruction) { @@ -558,7 +558,7 @@ void InstructionCodeGeneratorX86::VisitVecSub(HVecSub* instruction) { } void LocationsBuilderX86::VisitVecMul(HVecMul* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86::VisitVecMul(HVecMul* instruction) { @@ -591,7 +591,7 @@ void InstructionCodeGeneratorX86::VisitVecMul(HVecMul* instruction) { } void LocationsBuilderX86::VisitVecDiv(HVecDiv* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86::VisitVecDiv(HVecDiv* instruction) { @@ -615,7 +615,7 @@ void InstructionCodeGeneratorX86::VisitVecDiv(HVecDiv* instruction) { } void LocationsBuilderX86::VisitVecMin(HVecMin* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86::VisitVecMin(HVecMin* instruction) { @@ -666,7 +666,7 @@ void InstructionCodeGeneratorX86::VisitVecMin(HVecMin* instruction) { } void LocationsBuilderX86::VisitVecMax(HVecMax* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86::VisitVecMax(HVecMax* instruction) { @@ -717,7 +717,7 @@ void InstructionCodeGeneratorX86::VisitVecMax(HVecMax* instruction) { } void LocationsBuilderX86::VisitVecAnd(HVecAnd* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86::VisitVecAnd(HVecAnd* instruction) { @@ -752,7 +752,7 @@ void InstructionCodeGeneratorX86::VisitVecAnd(HVecAnd* instruction) { } void LocationsBuilderX86::VisitVecAndNot(HVecAndNot* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86::VisitVecAndNot(HVecAndNot* instruction) { @@ -787,7 +787,7 @@ void InstructionCodeGeneratorX86::VisitVecAndNot(HVecAndNot* instruction) { } void LocationsBuilderX86::VisitVecOr(HVecOr* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86::VisitVecOr(HVecOr* instruction) { @@ -822,7 +822,7 @@ void InstructionCodeGeneratorX86::VisitVecOr(HVecOr* instruction) { } void LocationsBuilderX86::VisitVecXor(HVecXor* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86::VisitVecXor(HVecXor* instruction) { @@ -857,8 +857,8 @@ void InstructionCodeGeneratorX86::VisitVecXor(HVecXor* instruction) { } // Helper to set up locations for vector shift operations. -static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) { - LocationSummary* locations = new (arena) LocationSummary(instruction); +static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kUint16: case DataType::Type::kInt16: @@ -875,7 +875,7 @@ static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* } void LocationsBuilderX86::VisitVecShl(HVecShl* instruction) { - CreateVecShiftLocations(GetGraph()->GetArena(), instruction); + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86::VisitVecShl(HVecShl* instruction) { @@ -904,7 +904,7 @@ void InstructionCodeGeneratorX86::VisitVecShl(HVecShl* instruction) { } void LocationsBuilderX86::VisitVecShr(HVecShr* instruction) { - CreateVecShiftLocations(GetGraph()->GetArena(), instruction); + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86::VisitVecShr(HVecShr* instruction) { @@ -929,7 +929,7 @@ void InstructionCodeGeneratorX86::VisitVecShr(HVecShr* instruction) { } void LocationsBuilderX86::VisitVecUShr(HVecUShr* instruction) { - CreateVecShiftLocations(GetGraph()->GetArena(), instruction); + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86::VisitVecUShr(HVecUShr* instruction) { @@ -958,7 +958,7 @@ void InstructionCodeGeneratorX86::VisitVecUShr(HVecUShr* instruction) { } void LocationsBuilderX86::VisitVecSetScalars(HVecSetScalars* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); DCHECK_EQ(1u, instruction->InputCount()); // only one input currently implemented @@ -1045,8 +1045,8 @@ void InstructionCodeGeneratorX86::VisitVecSetScalars(HVecSetScalars* instruction } // Helper to set up locations for vector accumulations. -static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) { - LocationSummary* locations = new (arena) LocationSummary(instruction); +static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kUint8: case DataType::Type::kInt8: @@ -1066,7 +1066,7 @@ static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instru } void LocationsBuilderX86::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) { - CreateVecAccumLocations(GetGraph()->GetArena(), instruction); + CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) { @@ -1075,7 +1075,7 @@ void InstructionCodeGeneratorX86::VisitVecMultiplyAccumulate(HVecMultiplyAccumul } void LocationsBuilderX86::VisitVecSADAccumulate(HVecSADAccumulate* instruction) { - CreateVecAccumLocations(GetGraph()->GetArena(), instruction); + CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86::VisitVecSADAccumulate(HVecSADAccumulate* instruction) { @@ -1084,10 +1084,10 @@ void InstructionCodeGeneratorX86::VisitVecSADAccumulate(HVecSADAccumulate* instr } // Helper to set up locations for vector memory operations. -static void CreateVecMemLocations(ArenaAllocator* arena, +static void CreateVecMemLocations(ArenaAllocator* allocator, HVecMemoryOperation* instruction, bool is_load) { - LocationSummary* locations = new (arena) LocationSummary(instruction); + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kBool: case DataType::Type::kUint8: @@ -1131,7 +1131,7 @@ static Address VecAddress(LocationSummary* locations, size_t size, bool is_strin } void LocationsBuilderX86::VisitVecLoad(HVecLoad* instruction) { - CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ true); + CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ true); // String load requires a temporary for the compressed load. if (mirror::kUseStringCompression && instruction->IsStringCharAt()) { instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister()); @@ -1194,7 +1194,7 @@ void InstructionCodeGeneratorX86::VisitVecLoad(HVecLoad* instruction) { } void LocationsBuilderX86::VisitVecStore(HVecStore* instruction) { - CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ false); + CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ false); } void InstructionCodeGeneratorX86::VisitVecStore(HVecStore* instruction) { diff --git a/compiler/optimizing/code_generator_vector_x86_64.cc b/compiler/optimizing/code_generator_vector_x86_64.cc index 2270f6b9c8..107030e6c2 100644 --- a/compiler/optimizing/code_generator_vector_x86_64.cc +++ b/compiler/optimizing/code_generator_vector_x86_64.cc @@ -26,7 +26,7 @@ namespace x86_64 { #define __ down_cast<X86_64Assembler*>(GetAssembler())-> // NOLINT void LocationsBuilderX86_64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); HInstruction* input = instruction->InputAt(0); bool is_zero = IsZeroBitPattern(input); switch (instruction->GetPackedType()) { @@ -108,7 +108,7 @@ void InstructionCodeGeneratorX86_64::VisitVecReplicateScalar(HVecReplicateScalar } void LocationsBuilderX86_64::VisitVecExtractScalar(HVecExtractScalar* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kBool: case DataType::Type::kUint8: @@ -163,8 +163,8 @@ void InstructionCodeGeneratorX86_64::VisitVecExtractScalar(HVecExtractScalar* in } // Helper to set up locations for vector unary operations. -static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) { - LocationSummary* locations = new (arena) LocationSummary(instruction); +static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kBool: case DataType::Type::kUint8: @@ -185,7 +185,7 @@ static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* in } void LocationsBuilderX86_64::VisitVecReduce(HVecReduce* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); // Long reduction or min/max require a temporary. if (instruction->GetPackedType() == DataType::Type::kInt64 || instruction->GetKind() == HVecReduce::kMin || @@ -252,7 +252,7 @@ void InstructionCodeGeneratorX86_64::VisitVecReduce(HVecReduce* instruction) { } void LocationsBuilderX86_64::VisitVecCnv(HVecCnv* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86_64::VisitVecCnv(HVecCnv* instruction) { @@ -270,7 +270,7 @@ void InstructionCodeGeneratorX86_64::VisitVecCnv(HVecCnv* instruction) { } void LocationsBuilderX86_64::VisitVecNeg(HVecNeg* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86_64::VisitVecNeg(HVecNeg* instruction) { @@ -317,7 +317,7 @@ void InstructionCodeGeneratorX86_64::VisitVecNeg(HVecNeg* instruction) { } void LocationsBuilderX86_64::VisitVecAbs(HVecAbs* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); // Integral-abs requires a temporary for the comparison. if (instruction->GetPackedType() == DataType::Type::kInt32) { instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister()); @@ -358,7 +358,7 @@ void InstructionCodeGeneratorX86_64::VisitVecAbs(HVecAbs* instruction) { } void LocationsBuilderX86_64::VisitVecNot(HVecNot* instruction) { - CreateVecUnOpLocations(GetGraph()->GetArena(), instruction); + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); // Boolean-not requires a temporary to construct the 16 x one. if (instruction->GetPackedType() == DataType::Type::kBool) { instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister()); @@ -407,8 +407,8 @@ void InstructionCodeGeneratorX86_64::VisitVecNot(HVecNot* instruction) { } // Helper to set up locations for vector binary operations. -static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) { - LocationSummary* locations = new (arena) LocationSummary(instruction); +static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kBool: case DataType::Type::kUint8: @@ -430,7 +430,7 @@ static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* } void LocationsBuilderX86_64::VisitVecAdd(HVecAdd* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86_64::VisitVecAdd(HVecAdd* instruction) { @@ -472,7 +472,7 @@ void InstructionCodeGeneratorX86_64::VisitVecAdd(HVecAdd* instruction) { } void LocationsBuilderX86_64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86_64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) { @@ -499,7 +499,7 @@ void InstructionCodeGeneratorX86_64::VisitVecHalvingAdd(HVecHalvingAdd* instruct } void LocationsBuilderX86_64::VisitVecSub(HVecSub* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86_64::VisitVecSub(HVecSub* instruction) { @@ -541,7 +541,7 @@ void InstructionCodeGeneratorX86_64::VisitVecSub(HVecSub* instruction) { } void LocationsBuilderX86_64::VisitVecMul(HVecMul* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86_64::VisitVecMul(HVecMul* instruction) { @@ -574,7 +574,7 @@ void InstructionCodeGeneratorX86_64::VisitVecMul(HVecMul* instruction) { } void LocationsBuilderX86_64::VisitVecDiv(HVecDiv* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86_64::VisitVecDiv(HVecDiv* instruction) { @@ -598,7 +598,7 @@ void InstructionCodeGeneratorX86_64::VisitVecDiv(HVecDiv* instruction) { } void LocationsBuilderX86_64::VisitVecMin(HVecMin* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86_64::VisitVecMin(HVecMin* instruction) { @@ -649,7 +649,7 @@ void InstructionCodeGeneratorX86_64::VisitVecMin(HVecMin* instruction) { } void LocationsBuilderX86_64::VisitVecMax(HVecMax* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86_64::VisitVecMax(HVecMax* instruction) { @@ -700,7 +700,7 @@ void InstructionCodeGeneratorX86_64::VisitVecMax(HVecMax* instruction) { } void LocationsBuilderX86_64::VisitVecAnd(HVecAnd* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86_64::VisitVecAnd(HVecAnd* instruction) { @@ -735,7 +735,7 @@ void InstructionCodeGeneratorX86_64::VisitVecAnd(HVecAnd* instruction) { } void LocationsBuilderX86_64::VisitVecAndNot(HVecAndNot* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86_64::VisitVecAndNot(HVecAndNot* instruction) { @@ -770,7 +770,7 @@ void InstructionCodeGeneratorX86_64::VisitVecAndNot(HVecAndNot* instruction) { } void LocationsBuilderX86_64::VisitVecOr(HVecOr* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86_64::VisitVecOr(HVecOr* instruction) { @@ -805,7 +805,7 @@ void InstructionCodeGeneratorX86_64::VisitVecOr(HVecOr* instruction) { } void LocationsBuilderX86_64::VisitVecXor(HVecXor* instruction) { - CreateVecBinOpLocations(GetGraph()->GetArena(), instruction); + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86_64::VisitVecXor(HVecXor* instruction) { @@ -840,8 +840,8 @@ void InstructionCodeGeneratorX86_64::VisitVecXor(HVecXor* instruction) { } // Helper to set up locations for vector shift operations. -static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) { - LocationSummary* locations = new (arena) LocationSummary(instruction); +static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kUint16: case DataType::Type::kInt16: @@ -858,7 +858,7 @@ static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* } void LocationsBuilderX86_64::VisitVecShl(HVecShl* instruction) { - CreateVecShiftLocations(GetGraph()->GetArena(), instruction); + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86_64::VisitVecShl(HVecShl* instruction) { @@ -887,7 +887,7 @@ void InstructionCodeGeneratorX86_64::VisitVecShl(HVecShl* instruction) { } void LocationsBuilderX86_64::VisitVecShr(HVecShr* instruction) { - CreateVecShiftLocations(GetGraph()->GetArena(), instruction); + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86_64::VisitVecShr(HVecShr* instruction) { @@ -912,7 +912,7 @@ void InstructionCodeGeneratorX86_64::VisitVecShr(HVecShr* instruction) { } void LocationsBuilderX86_64::VisitVecUShr(HVecUShr* instruction) { - CreateVecShiftLocations(GetGraph()->GetArena(), instruction); + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86_64::VisitVecUShr(HVecUShr* instruction) { @@ -941,7 +941,7 @@ void InstructionCodeGeneratorX86_64::VisitVecUShr(HVecUShr* instruction) { } void LocationsBuilderX86_64::VisitVecSetScalars(HVecSetScalars* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); DCHECK_EQ(1u, instruction->InputCount()); // only one input currently implemented @@ -1018,8 +1018,8 @@ void InstructionCodeGeneratorX86_64::VisitVecSetScalars(HVecSetScalars* instruct } // Helper to set up locations for vector accumulations. -static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) { - LocationSummary* locations = new (arena) LocationSummary(instruction); +static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kUint8: case DataType::Type::kInt8: @@ -1039,7 +1039,7 @@ static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instru } void LocationsBuilderX86_64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) { - CreateVecAccumLocations(GetGraph()->GetArena(), instruction); + CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86_64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) { @@ -1048,7 +1048,7 @@ void InstructionCodeGeneratorX86_64::VisitVecMultiplyAccumulate(HVecMultiplyAccu } void LocationsBuilderX86_64::VisitVecSADAccumulate(HVecSADAccumulate* instruction) { - CreateVecAccumLocations(GetGraph()->GetArena(), instruction); + CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction); } void InstructionCodeGeneratorX86_64::VisitVecSADAccumulate(HVecSADAccumulate* instruction) { @@ -1057,10 +1057,10 @@ void InstructionCodeGeneratorX86_64::VisitVecSADAccumulate(HVecSADAccumulate* in } // Helper to set up locations for vector memory operations. -static void CreateVecMemLocations(ArenaAllocator* arena, +static void CreateVecMemLocations(ArenaAllocator* allocator, HVecMemoryOperation* instruction, bool is_load) { - LocationSummary* locations = new (arena) LocationSummary(instruction); + LocationSummary* locations = new (allocator) LocationSummary(instruction); switch (instruction->GetPackedType()) { case DataType::Type::kBool: case DataType::Type::kUint8: @@ -1104,7 +1104,7 @@ static Address VecAddress(LocationSummary* locations, size_t size, bool is_strin } void LocationsBuilderX86_64::VisitVecLoad(HVecLoad* instruction) { - CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ true); + CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ true); // String load requires a temporary for the compressed load. if (mirror::kUseStringCompression && instruction->IsStringCharAt()) { instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister()); @@ -1167,7 +1167,7 @@ void InstructionCodeGeneratorX86_64::VisitVecLoad(HVecLoad* instruction) { } void LocationsBuilderX86_64::VisitVecStore(HVecStore* instruction) { - CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ false); + CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ false); } void InstructionCodeGeneratorX86_64::VisitVecStore(HVecStore* instruction) { diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 35156491e8..d8a47fa1ea 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -414,7 +414,7 @@ class ArraySetSlowPathX86 : public SlowPathCode { SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; - HParallelMove parallel_move(codegen->GetGraph()->GetArena()); + HParallelMove parallel_move(codegen->GetGraph()->GetAllocator()); parallel_move.AddMove( locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), @@ -811,7 +811,7 @@ class ReadBarrierForHeapReferenceSlowPathX86 : public SlowPathCode { // We're moving two or three locations to locations that could // overlap, so we need a parallel move resolver. InvokeRuntimeCallingConvention calling_convention; - HParallelMove parallel_move(codegen->GetGraph()->GetArena()); + HParallelMove parallel_move(codegen->GetGraph()->GetAllocator()); parallel_move.AddMove(ref_, Location::RegisterLocation(calling_convention.GetRegisterAt(0)), DataType::Type::kReference, @@ -1030,21 +1030,21 @@ CodeGeneratorX86::CodeGeneratorX86(HGraph* graph, block_labels_(nullptr), location_builder_(graph, this), instruction_visitor_(graph, this), - move_resolver_(graph->GetArena(), this), - assembler_(graph->GetArena()), + move_resolver_(graph->GetAllocator(), this), + assembler_(graph->GetAllocator()), isa_features_(isa_features), - boot_image_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - boot_image_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - string_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), + boot_image_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + boot_image_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + jit_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + jit_class_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), constant_area_start_(-1), - fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), + fixups_to_jump_tables_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), method_address_offset_(std::less<uint32_t>(), - graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) { + graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) { // Use a fake return address register to mimic Quick. AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister)); } @@ -1333,7 +1333,7 @@ void CodeGeneratorX86::MoveConstant(Location location, int32_t value) { } void CodeGeneratorX86::MoveLocation(Location dst, Location src, DataType::Type dst_type) { - HParallelMove move(GetGraph()->GetArena()); + HParallelMove move(GetGraph()->GetAllocator()); if (dst_type == DataType::Type::kInt64 && !src.IsConstant() && !src.IsFpuRegister()) { move.AddMove(src.ToLow(), dst.ToLow(), DataType::Type::kInt32, nullptr); move.AddMove(src.ToHigh(), dst.ToHigh(), DataType::Type::kInt32, nullptr); @@ -1681,7 +1681,7 @@ void InstructionCodeGeneratorX86::GenerateTestAndBranch(HInstruction* instructio } void LocationsBuilderX86::VisitIf(HIf* if_instr) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr); if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) { locations->SetInAt(0, Location::Any()); } @@ -1698,7 +1698,7 @@ void InstructionCodeGeneratorX86::VisitIf(HIf* if_instr) { } void LocationsBuilderX86::VisitDeoptimize(HDeoptimize* deoptimize) { - LocationSummary* locations = new (GetGraph()->GetArena()) + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath); InvokeRuntimeCallingConvention calling_convention; RegisterSet caller_saves = RegisterSet::Empty(); @@ -1718,7 +1718,7 @@ void InstructionCodeGeneratorX86::VisitDeoptimize(HDeoptimize* deoptimize) { } void LocationsBuilderX86::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) { - LocationSummary* locations = new (GetGraph()->GetArena()) + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(flag, LocationSummary::kNoCall); locations->SetOut(Location::RequiresRegister()); } @@ -1750,7 +1750,7 @@ static bool SelectCanUseCMOV(HSelect* select) { } void LocationsBuilderX86::VisitSelect(HSelect* select) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select); if (DataType::IsFloatingPointType(select->GetType())) { locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::Any()); @@ -1844,7 +1844,7 @@ void InstructionCodeGeneratorX86::VisitSelect(HSelect* select) { } void LocationsBuilderX86::VisitNativeDebugInfo(HNativeDebugInfo* info) { - new (GetGraph()->GetArena()) LocationSummary(info); + new (GetGraph()->GetAllocator()) LocationSummary(info); } void InstructionCodeGeneratorX86::VisitNativeDebugInfo(HNativeDebugInfo*) { @@ -1857,7 +1857,7 @@ void CodeGeneratorX86::GenerateNop() { void LocationsBuilderX86::HandleCondition(HCondition* cond) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(cond, LocationSummary::kNoCall); // Handle the long/FP comparisons made in instruction simplification. switch (cond->InputAt(0)->GetType()) { case DataType::Type::kInt64: { @@ -2024,7 +2024,7 @@ void InstructionCodeGeneratorX86::VisitAboveOrEqual(HAboveOrEqual* comp) { void LocationsBuilderX86::VisitIntConstant(HIntConstant* constant) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall); locations->SetOut(Location::ConstantLocation(constant)); } @@ -2034,7 +2034,7 @@ void InstructionCodeGeneratorX86::VisitIntConstant(HIntConstant* constant ATTRIB void LocationsBuilderX86::VisitNullConstant(HNullConstant* constant) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall); locations->SetOut(Location::ConstantLocation(constant)); } @@ -2044,7 +2044,7 @@ void InstructionCodeGeneratorX86::VisitNullConstant(HNullConstant* constant ATTR void LocationsBuilderX86::VisitLongConstant(HLongConstant* constant) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall); locations->SetOut(Location::ConstantLocation(constant)); } @@ -2054,7 +2054,7 @@ void InstructionCodeGeneratorX86::VisitLongConstant(HLongConstant* constant ATTR void LocationsBuilderX86::VisitFloatConstant(HFloatConstant* constant) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall); locations->SetOut(Location::ConstantLocation(constant)); } @@ -2064,7 +2064,7 @@ void InstructionCodeGeneratorX86::VisitFloatConstant(HFloatConstant* constant AT void LocationsBuilderX86::VisitDoubleConstant(HDoubleConstant* constant) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall); locations->SetOut(Location::ConstantLocation(constant)); } @@ -2099,7 +2099,7 @@ void InstructionCodeGeneratorX86::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNU void LocationsBuilderX86::VisitReturn(HReturn* ret) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(ret, LocationSummary::kNoCall); switch (ret->InputAt(0)->GetType()) { case DataType::Type::kReference: case DataType::Type::kBool: @@ -2300,7 +2300,7 @@ void InstructionCodeGeneratorX86::VisitInvokePolymorphic(HInvokePolymorphic* inv void LocationsBuilderX86::VisitNeg(HNeg* neg) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall); switch (neg->GetResultType()) { case DataType::Type::kInt32: case DataType::Type::kInt64: @@ -2381,7 +2381,7 @@ void InstructionCodeGeneratorX86::VisitNeg(HNeg* neg) { void LocationsBuilderX86::VisitX86FPNeg(HX86FPNeg* neg) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall); DCHECK(DataType::IsFloatingPointType(neg->GetType())); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresRegister()); @@ -2423,7 +2423,7 @@ void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) { ? LocationSummary::kCallOnMainOnly : LocationSummary::kNoCall; LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind); + new (GetGraph()->GetAllocator()) LocationSummary(conversion, call_kind); switch (result_type) { case DataType::Type::kUint8: @@ -2921,7 +2921,7 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio void LocationsBuilderX86::VisitAdd(HAdd* add) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(add, LocationSummary::kNoCall); switch (add->GetResultType()) { case DataType::Type::kInt32: { locations->SetInAt(0, Location::RequiresRegister()); @@ -3048,7 +3048,7 @@ void InstructionCodeGeneratorX86::VisitAdd(HAdd* add) { void LocationsBuilderX86::VisitSub(HSub* sub) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(sub, LocationSummary::kNoCall); switch (sub->GetResultType()) { case DataType::Type::kInt32: case DataType::Type::kInt64: { @@ -3154,7 +3154,7 @@ void InstructionCodeGeneratorX86::VisitSub(HSub* sub) { void LocationsBuilderX86::VisitMul(HMul* mul) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall); switch (mul->GetResultType()) { case DataType::Type::kInt32: locations->SetInAt(0, Location::RequiresRegister()); @@ -3581,7 +3581,7 @@ void InstructionCodeGeneratorX86::GenerateDivRemIntegral(HBinaryOperation* instr GenerateDivRemWithAnyConstant(instruction); } } else { - SlowPathCode* slow_path = new (GetGraph()->GetArena()) DivRemMinusOneSlowPathX86( + SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) DivRemMinusOneSlowPathX86( instruction, out.AsRegister<Register>(), is_div); codegen_->AddSlowPath(slow_path); @@ -3630,7 +3630,7 @@ void LocationsBuilderX86::VisitDiv(HDiv* div) { LocationSummary::CallKind call_kind = (div->GetResultType() == DataType::Type::kInt64) ? LocationSummary::kCallOnMainOnly : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(div, call_kind); switch (div->GetResultType()) { case DataType::Type::kInt32: { @@ -3735,7 +3735,7 @@ void LocationsBuilderX86::VisitRem(HRem* rem) { LocationSummary::CallKind call_kind = (rem->GetResultType() == DataType::Type::kInt64) ? LocationSummary::kCallOnMainOnly : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(rem, call_kind); switch (type) { case DataType::Type::kInt32: { @@ -3817,7 +3817,7 @@ void LocationsBuilderX86::VisitDivZeroCheck(HDivZeroCheck* instruction) { } void InstructionCodeGeneratorX86::VisitDivZeroCheck(HDivZeroCheck* instruction) { - SlowPathCode* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathX86(instruction); + SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathX86(instruction); codegen_->AddSlowPath(slow_path); LocationSummary* locations = instruction->GetLocations(); @@ -3867,7 +3867,7 @@ void LocationsBuilderX86::HandleShift(HBinaryOperation* op) { DCHECK(op->IsShl() || op->IsShr() || op->IsUShr()); LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(op, LocationSummary::kNoCall); switch (op->GetResultType()) { case DataType::Type::kInt32: @@ -4062,7 +4062,7 @@ void InstructionCodeGeneratorX86::GenerateUShrLong(const Location& loc, Register void LocationsBuilderX86::VisitRor(HRor* ror) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(ror, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(ror, LocationSummary::kNoCall); switch (ror->GetResultType()) { case DataType::Type::kInt64: @@ -4170,8 +4170,8 @@ void InstructionCodeGeneratorX86::VisitUShr(HUShr* ushr) { } void LocationsBuilderX86::VisitNewInstance(HNewInstance* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); locations->SetOut(Location::RegisterLocation(EAX)); if (instruction->IsStringAlloc()) { locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument)); @@ -4199,8 +4199,8 @@ void InstructionCodeGeneratorX86::VisitNewInstance(HNewInstance* instruction) { } void LocationsBuilderX86::VisitNewArray(HNewArray* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); locations->SetOut(Location::RegisterLocation(EAX)); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); @@ -4219,7 +4219,7 @@ void InstructionCodeGeneratorX86::VisitNewArray(HNewArray* instruction) { void LocationsBuilderX86::VisitParameterValue(HParameterValue* instruction) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); Location location = parameter_visitor_.GetNextLocation(instruction->GetType()); if (location.IsStackSlot()) { location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize()); @@ -4235,7 +4235,7 @@ void InstructionCodeGeneratorX86::VisitParameterValue( void LocationsBuilderX86::VisitCurrentMethod(HCurrentMethod* instruction) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument)); } @@ -4244,7 +4244,7 @@ void InstructionCodeGeneratorX86::VisitCurrentMethod(HCurrentMethod* instruction void LocationsBuilderX86::VisitClassTableGet(HClassTableGet* instruction) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister()); } @@ -4270,7 +4270,7 @@ void InstructionCodeGeneratorX86::VisitClassTableGet(HClassTableGet* instruction void LocationsBuilderX86::VisitNot(HNot* not_) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(not_, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::SameAsFirstInput()); } @@ -4297,7 +4297,7 @@ void InstructionCodeGeneratorX86::VisitNot(HNot* not_) { void LocationsBuilderX86::VisitBooleanNot(HBooleanNot* bool_not) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(bool_not, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::SameAsFirstInput()); } @@ -4312,7 +4312,7 @@ void InstructionCodeGeneratorX86::VisitBooleanNot(HBooleanNot* bool_not) { void LocationsBuilderX86::VisitCompare(HCompare* compare) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(compare, LocationSummary::kNoCall); switch (compare->InputAt(0)->GetType()) { case DataType::Type::kBool: case DataType::Type::kUint8: @@ -4431,7 +4431,7 @@ void InstructionCodeGeneratorX86::VisitCompare(HCompare* compare) { void LocationsBuilderX86::VisitPhi(HPhi* instruction) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) { locations->SetInAt(i, Location::Any()); } @@ -4714,10 +4714,10 @@ void LocationsBuilderX86::HandleFieldGet(HInstruction* instruction, const FieldI bool object_field_get_with_read_barrier = kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, - kEmitCompilerReadBarrier ? - LocationSummary::kCallOnSlowPath : - LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, + kEmitCompilerReadBarrier + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall); if (object_field_get_with_read_barrier && kUseBakerReadBarrier) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } @@ -4862,7 +4862,7 @@ void LocationsBuilderX86::HandleFieldSet(HInstruction* instruction, const FieldI DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet()); LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); bool is_volatile = field_info.IsVolatile(); DataType::Type field_type = field_info.GetFieldType(); @@ -5149,7 +5149,7 @@ void CodeGeneratorX86::GenerateImplicitNullCheck(HNullCheck* instruction) { } void CodeGeneratorX86::GenerateExplicitNullCheck(HNullCheck* instruction) { - SlowPathCode* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathX86(instruction); + SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) NullCheckSlowPathX86(instruction); AddSlowPath(slow_path); LocationSummary* locations = instruction->GetLocations(); @@ -5176,10 +5176,10 @@ void LocationsBuilderX86::VisitArrayGet(HArrayGet* instruction) { bool object_array_get_with_read_barrier = kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, - object_array_get_with_read_barrier ? - LocationSummary::kCallOnSlowPath : - LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, + object_array_get_with_read_barrier + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall); if (object_array_get_with_read_barrier && kUseBakerReadBarrier) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } @@ -5332,7 +5332,7 @@ void LocationsBuilderX86::VisitArraySet(HArraySet* instruction) { CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue()); bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck(); - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( instruction, may_need_runtime_call_for_type_check ? LocationSummary::kCallOnSlowPath : @@ -5427,7 +5427,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) { Location temp_loc = locations->GetTemp(0); Register temp = temp_loc.AsRegister<Register>(); if (may_need_runtime_call_for_type_check) { - slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathX86(instruction); + slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathX86(instruction); codegen_->AddSlowPath(slow_path); if (instruction->GetValueCanBeNull()) { __ testl(register_value, register_value); @@ -5570,7 +5570,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) { } void LocationsBuilderX86::VisitArrayLength(HArrayLength* instruction) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); locations->SetInAt(0, Location::RequiresRegister()); if (!instruction->IsEmittedAtUseSite()) { locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); @@ -5618,7 +5618,7 @@ void InstructionCodeGeneratorX86::VisitBoundsCheck(HBoundsCheck* instruction) { Location index_loc = locations->InAt(0); Location length_loc = locations->InAt(1); SlowPathCode* slow_path = - new (GetGraph()->GetArena()) BoundsCheckSlowPathX86(instruction); + new (GetGraph()->GetAllocator()) BoundsCheckSlowPathX86(instruction); if (length_loc.IsConstant()) { int32_t length = CodeGenerator::GetInt32ValueOf(length_loc.GetConstant()); @@ -5684,8 +5684,8 @@ void InstructionCodeGeneratorX86::VisitParallelMove(HParallelMove* instruction) } void LocationsBuilderX86::VisitSuspendCheck(HSuspendCheck* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnSlowPath); // In suspend check slow path, usually there are no caller-save registers at all. // If SIMD instructions are present, however, we force spilling all live SIMD // registers in full width (since the runtime only saves/restores lower part). @@ -5712,7 +5712,7 @@ void InstructionCodeGeneratorX86::GenerateSuspendCheck(HSuspendCheck* instructio SuspendCheckSlowPathX86* slow_path = down_cast<SuspendCheckSlowPathX86*>(instruction->GetSlowPath()); if (slow_path == nullptr) { - slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathX86(instruction, successor); + slow_path = new (GetGraph()->GetAllocator()) SuspendCheckSlowPathX86(instruction, successor); instruction->SetSlowPath(slow_path); codegen_->AddSlowPath(slow_path); if (successor != nullptr) { @@ -6044,7 +6044,7 @@ void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) { LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier) ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind); if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } @@ -6165,7 +6165,7 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFE if (generate_null_check || cls->MustGenerateClinitCheck()) { DCHECK(cls->CanCallRuntime()); - SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86( + SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathX86( cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck()); codegen_->AddSlowPath(slow_path); @@ -6184,7 +6184,7 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFE void LocationsBuilderX86::VisitClinitCheck(HClinitCheck* check) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath); + new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath); locations->SetInAt(0, Location::RequiresRegister()); if (check->HasUses()) { locations->SetOut(Location::SameAsFirstInput()); @@ -6193,7 +6193,7 @@ void LocationsBuilderX86::VisitClinitCheck(HClinitCheck* check) { void InstructionCodeGeneratorX86::VisitClinitCheck(HClinitCheck* check) { // We assume the class to not be null. - SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86( + SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathX86( check->GetLoadClass(), check, check->GetDexPc(), true); codegen_->AddSlowPath(slow_path); GenerateClassInitializationCheck(slow_path, @@ -6229,7 +6229,7 @@ HLoadString::LoadKind CodeGeneratorX86::GetSupportedLoadStringKind( void LocationsBuilderX86::VisitLoadString(HLoadString* load) { LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load); - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind); HLoadString::LoadKind load_kind = load->GetLoadKind(); if (load_kind == HLoadString::LoadKind::kBootImageLinkTimePcRelative || load_kind == HLoadString::LoadKind::kBootImageInternTable || @@ -6300,7 +6300,7 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) NO_THREAD_S Label* fixup_label = codegen_->NewStringBssEntryPatch(load); // /* GcRoot<mirror::String> */ out = *address /* PC-relative */ GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption); - SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86(load); + SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadStringSlowPathX86(load); codegen_->AddSlowPath(slow_path); __ testl(out, out); __ j(kEqual, slow_path->GetEntryLabel()); @@ -6333,7 +6333,7 @@ static Address GetExceptionTlsAddress() { void LocationsBuilderX86::VisitLoadException(HLoadException* load) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall); locations->SetOut(Location::RequiresRegister()); } @@ -6342,7 +6342,7 @@ void InstructionCodeGeneratorX86::VisitLoadException(HLoadException* load) { } void LocationsBuilderX86::VisitClearException(HClearException* clear) { - new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall); } void InstructionCodeGeneratorX86::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) { @@ -6350,8 +6350,8 @@ void InstructionCodeGeneratorX86::VisitClearException(HClearException* clear ATT } void LocationsBuilderX86::VisitThrow(HThrow* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); } @@ -6403,7 +6403,8 @@ void LocationsBuilderX86::VisitInstanceOf(HInstanceOf* instruction) { break; } - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind); if (baker_read_barrier_slow_path) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } @@ -6580,8 +6581,8 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) { __ cmpl(out, Address(ESP, cls.GetStackIndex())); } DCHECK(locations->OnlyCallsOnSlowPath()); - slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(instruction, - /* is_fatal */ false); + slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86(instruction, + /* is_fatal */ false); codegen_->AddSlowPath(slow_path); __ j(kNotEqual, slow_path->GetEntryLabel()); __ movl(out, Immediate(1)); @@ -6612,8 +6613,8 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) { // call to the runtime not using a type checking slow path). // This should also be beneficial for the other cases above. DCHECK(locations->OnlyCallsOnSlowPath()); - slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(instruction, - /* is_fatal */ false); + slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86(instruction, + /* is_fatal */ false); codegen_->AddSlowPath(slow_path); __ jmp(slow_path->GetEntryLabel()); if (zero.IsLinked()) { @@ -6661,7 +6662,8 @@ void LocationsBuilderX86::VisitCheckCast(HCheckCast* instruction) { IsTypeCheckSlowPathFatal(type_check_kind, throws_into_catch) ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind); locations->SetInAt(0, Location::RequiresRegister()); if (type_check_kind == TypeCheckKind::kInterfaceCheck) { // Require a register for the interface check since there is a loop that compares the class to @@ -6704,8 +6706,8 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) { IsTypeCheckSlowPathFatal(type_check_kind, instruction->CanThrowIntoCatchBlock()); SlowPathCode* type_check_slow_path = - new (GetGraph()->GetArena()) TypeCheckSlowPathX86(instruction, - is_type_check_slow_path_fatal); + new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86(instruction, + is_type_check_slow_path_fatal); codegen_->AddSlowPath(type_check_slow_path); NearLabel done; @@ -6902,8 +6904,8 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) { } void LocationsBuilderX86::VisitMonitorOperation(HMonitorOperation* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); } @@ -6926,7 +6928,7 @@ void LocationsBuilderX86::VisitXor(HXor* instruction) { HandleBitwiseOperation(i void LocationsBuilderX86::HandleBitwiseOperation(HBinaryOperation* instruction) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); DCHECK(instruction->GetResultType() == DataType::Type::kInt32 || instruction->GetResultType() == DataType::Type::kInt64); locations->SetInAt(0, Location::RequiresRegister()); @@ -7148,7 +7150,7 @@ void InstructionCodeGeneratorX86::GenerateGcRootFieldLoad( "have different sizes."); // Slow path marking the GC root `root`. - SlowPathCode* slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86( + SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathX86( instruction, root, /* unpoison_ref_before_marking */ false); codegen_->AddSlowPath(slow_path); @@ -7278,10 +7280,10 @@ void CodeGeneratorX86::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* i SlowPathCode* slow_path; if (always_update_field) { DCHECK(temp != nullptr); - slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkAndUpdateFieldSlowPathX86( + slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86( instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp); } else { - slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86( + slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathX86( instruction, ref, /* unpoison_ref_before_marking */ true); } AddSlowPath(slow_path); @@ -7314,7 +7316,7 @@ void CodeGeneratorX86::GenerateReadBarrierSlow(HInstruction* instruction, // not used by the artReadBarrierSlow entry point. // // TODO: Unpoison `ref` when it is used by artReadBarrierSlow. - SlowPathCode* slow_path = new (GetGraph()->GetArena()) + SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) ReadBarrierForHeapReferenceSlowPathX86(instruction, out, ref, obj, offset, index); AddSlowPath(slow_path); @@ -7350,7 +7352,7 @@ void CodeGeneratorX86::GenerateReadBarrierForRootSlow(HInstruction* instruction, // Note that GC roots are not affected by heap poisoning, so we do // not need to do anything special for this here. SlowPathCode* slow_path = - new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathX86(instruction, out, root); + new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathX86(instruction, out, root); AddSlowPath(slow_path); __ jmp(slow_path->GetEntryLabel()); @@ -7370,7 +7372,7 @@ void InstructionCodeGeneratorX86::VisitBoundType(HBoundType* instruction ATTRIBU // Simple implementation of packed switch - generate cascaded compare/jumps. void LocationsBuilderX86::VisitPackedSwitch(HPackedSwitch* switch_instr) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); } @@ -7437,7 +7439,7 @@ void InstructionCodeGeneratorX86::VisitPackedSwitch(HPackedSwitch* switch_instr) void LocationsBuilderX86::VisitX86PackedSwitch(HX86PackedSwitch* switch_instr) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); // Constant area pointer. @@ -7492,7 +7494,7 @@ void InstructionCodeGeneratorX86::VisitX86PackedSwitch(HX86PackedSwitch* switch_ void LocationsBuilderX86::VisitX86ComputeBaseMethodAddress( HX86ComputeBaseMethodAddress* insn) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(insn, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(insn, LocationSummary::kNoCall); locations->SetOut(Location::RequiresRegister()); } @@ -7516,7 +7518,7 @@ void InstructionCodeGeneratorX86::VisitX86ComputeBaseMethodAddress( void LocationsBuilderX86::VisitX86LoadFromConstantTable( HX86LoadFromConstantTable* insn) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(insn, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(insn, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::ConstantLocation(insn->GetConstant())); @@ -7676,28 +7678,31 @@ Address CodeGeneratorX86::LiteralDoubleAddress(double v, HX86ComputeBaseMethodAddress* method_base, Register reg) { AssemblerFixup* fixup = - new (GetGraph()->GetArena()) RIPFixup(*this, method_base, __ AddDouble(v)); + new (GetGraph()->GetAllocator()) RIPFixup(*this, method_base, __ AddDouble(v)); return Address(reg, kDummy32BitOffset, fixup); } Address CodeGeneratorX86::LiteralFloatAddress(float v, HX86ComputeBaseMethodAddress* method_base, Register reg) { - AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, method_base, __ AddFloat(v)); + AssemblerFixup* fixup = + new (GetGraph()->GetAllocator()) RIPFixup(*this, method_base, __ AddFloat(v)); return Address(reg, kDummy32BitOffset, fixup); } Address CodeGeneratorX86::LiteralInt32Address(int32_t v, HX86ComputeBaseMethodAddress* method_base, Register reg) { - AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, method_base, __ AddInt32(v)); + AssemblerFixup* fixup = + new (GetGraph()->GetAllocator()) RIPFixup(*this, method_base, __ AddInt32(v)); return Address(reg, kDummy32BitOffset, fixup); } Address CodeGeneratorX86::LiteralInt64Address(int64_t v, HX86ComputeBaseMethodAddress* method_base, Register reg) { - AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, method_base, __ AddInt64(v)); + AssemblerFixup* fixup = + new (GetGraph()->GetAllocator()) RIPFixup(*this, method_base, __ AddInt64(v)); return Address(reg, kDummy32BitOffset, fixup); } @@ -7747,7 +7752,7 @@ Address CodeGeneratorX86::LiteralCaseTable(HX86PackedSwitch* switch_instr, Register value) { // Create a fixup to be used to create and address the jump table. JumpTableRIPFixup* table_fixup = - new (GetGraph()->GetArena()) JumpTableRIPFixup(*this, switch_instr); + new (GetGraph()->GetAllocator()) JumpTableRIPFixup(*this, switch_instr); // We have to populate the jump tables. fixups_to_jump_tables_.push_back(table_fixup); @@ -7773,13 +7778,13 @@ void CodeGeneratorX86::MoveFromReturnRegister(Location target, DataType::Type ty // TODO: Consider pairs in the parallel move resolver, then this could be nicely merged // with the else branch. if (type == DataType::Type::kInt64) { - HParallelMove parallel_move(GetGraph()->GetArena()); + HParallelMove parallel_move(GetGraph()->GetAllocator()); parallel_move.AddMove(return_loc.ToLow(), target.ToLow(), DataType::Type::kInt32, nullptr); parallel_move.AddMove(return_loc.ToHigh(), target.ToHigh(), DataType::Type::kInt32, nullptr); GetMoveResolver()->EmitNativeCode(¶llel_move); } else { // Let the parallel move resolver take care of all of this. - HParallelMove parallel_move(GetGraph()->GetArena()); + HParallelMove parallel_move(GetGraph()->GetAllocator()); parallel_move.AddMove(return_loc, target, type, nullptr); GetMoveResolver()->EmitNativeCode(¶llel_move); } diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index e8bfa66a58..b6aa110f2d 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -427,7 +427,7 @@ class ArraySetSlowPathX86_64 : public SlowPathCode { SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; - HParallelMove parallel_move(codegen->GetGraph()->GetArena()); + HParallelMove parallel_move(codegen->GetGraph()->GetAllocator()); parallel_move.AddMove( locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), @@ -831,7 +831,7 @@ class ReadBarrierForHeapReferenceSlowPathX86_64 : public SlowPathCode { // We're moving two or three locations to locations that could // overlap, so we need a parallel move resolver. InvokeRuntimeCallingConvention calling_convention; - HParallelMove parallel_move(codegen->GetGraph()->GetArena()); + HParallelMove parallel_move(codegen->GetGraph()->GetAllocator()); parallel_move.AddMove(ref_, Location::RegisterLocation(calling_convention.GetRegisterAt(0)), DataType::Type::kReference, @@ -1230,19 +1230,19 @@ CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph, block_labels_(nullptr), location_builder_(graph, this), instruction_visitor_(graph, this), - move_resolver_(graph->GetArena(), this), - assembler_(graph->GetArena()), + move_resolver_(graph->GetAllocator(), this), + assembler_(graph->GetAllocator()), isa_features_(isa_features), constant_area_start_(0), - boot_image_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - boot_image_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - string_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) { + boot_image_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + boot_image_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + jit_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + jit_class_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + fixups_to_jump_tables_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) { AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister)); } @@ -1702,7 +1702,7 @@ void InstructionCodeGeneratorX86_64::GenerateTestAndBranch(HInstruction* instruc } void LocationsBuilderX86_64::VisitIf(HIf* if_instr) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr); if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) { locations->SetInAt(0, Location::Any()); } @@ -1719,7 +1719,7 @@ void InstructionCodeGeneratorX86_64::VisitIf(HIf* if_instr) { } void LocationsBuilderX86_64::VisitDeoptimize(HDeoptimize* deoptimize) { - LocationSummary* locations = new (GetGraph()->GetArena()) + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath); InvokeRuntimeCallingConvention calling_convention; RegisterSet caller_saves = RegisterSet::Empty(); @@ -1739,7 +1739,7 @@ void InstructionCodeGeneratorX86_64::VisitDeoptimize(HDeoptimize* deoptimize) { } void LocationsBuilderX86_64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) { - LocationSummary* locations = new (GetGraph()->GetArena()) + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(flag, LocationSummary::kNoCall); locations->SetOut(Location::RequiresRegister()); } @@ -1767,7 +1767,7 @@ static bool SelectCanUseCMOV(HSelect* select) { } void LocationsBuilderX86_64::VisitSelect(HSelect* select) { - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select); if (DataType::IsFloatingPointType(select->GetType())) { locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::Any()); @@ -1847,7 +1847,7 @@ void InstructionCodeGeneratorX86_64::VisitSelect(HSelect* select) { } void LocationsBuilderX86_64::VisitNativeDebugInfo(HNativeDebugInfo* info) { - new (GetGraph()->GetArena()) LocationSummary(info); + new (GetGraph()->GetAllocator()) LocationSummary(info); } void InstructionCodeGeneratorX86_64::VisitNativeDebugInfo(HNativeDebugInfo*) { @@ -1860,7 +1860,7 @@ void CodeGeneratorX86_64::GenerateNop() { void LocationsBuilderX86_64::HandleCondition(HCondition* cond) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(cond, LocationSummary::kNoCall); // Handle the long/FP comparisons made in instruction simplification. switch (cond->InputAt(0)->GetType()) { case DataType::Type::kInt64: @@ -2034,7 +2034,7 @@ void InstructionCodeGeneratorX86_64::VisitAboveOrEqual(HAboveOrEqual* comp) { void LocationsBuilderX86_64::VisitCompare(HCompare* compare) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(compare, LocationSummary::kNoCall); switch (compare->InputAt(0)->GetType()) { case DataType::Type::kBool: case DataType::Type::kUint8: @@ -2132,7 +2132,7 @@ void InstructionCodeGeneratorX86_64::VisitCompare(HCompare* compare) { void LocationsBuilderX86_64::VisitIntConstant(HIntConstant* constant) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall); locations->SetOut(Location::ConstantLocation(constant)); } @@ -2142,7 +2142,7 @@ void InstructionCodeGeneratorX86_64::VisitIntConstant(HIntConstant* constant ATT void LocationsBuilderX86_64::VisitNullConstant(HNullConstant* constant) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall); locations->SetOut(Location::ConstantLocation(constant)); } @@ -2152,7 +2152,7 @@ void InstructionCodeGeneratorX86_64::VisitNullConstant(HNullConstant* constant A void LocationsBuilderX86_64::VisitLongConstant(HLongConstant* constant) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall); locations->SetOut(Location::ConstantLocation(constant)); } @@ -2162,7 +2162,7 @@ void InstructionCodeGeneratorX86_64::VisitLongConstant(HLongConstant* constant A void LocationsBuilderX86_64::VisitFloatConstant(HFloatConstant* constant) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall); locations->SetOut(Location::ConstantLocation(constant)); } @@ -2172,7 +2172,7 @@ void InstructionCodeGeneratorX86_64::VisitFloatConstant(HFloatConstant* constant void LocationsBuilderX86_64::VisitDoubleConstant(HDoubleConstant* constant) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall); locations->SetOut(Location::ConstantLocation(constant)); } @@ -2208,7 +2208,7 @@ void InstructionCodeGeneratorX86_64::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_ void LocationsBuilderX86_64::VisitReturn(HReturn* ret) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(ret, LocationSummary::kNoCall); switch (ret->InputAt(0)->GetType()) { case DataType::Type::kReference: case DataType::Type::kBool: @@ -2474,7 +2474,7 @@ void InstructionCodeGeneratorX86_64::VisitInvokePolymorphic(HInvokePolymorphic* void LocationsBuilderX86_64::VisitNeg(HNeg* neg) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall); switch (neg->GetResultType()) { case DataType::Type::kInt32: case DataType::Type::kInt64: @@ -2540,7 +2540,7 @@ void InstructionCodeGeneratorX86_64::VisitNeg(HNeg* neg) { void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(conversion, LocationSummary::kNoCall); DataType::Type result_type = conversion->GetResultType(); DataType::Type input_type = conversion->GetInputType(); DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type)) @@ -3010,7 +3010,7 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver void LocationsBuilderX86_64::VisitAdd(HAdd* add) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(add, LocationSummary::kNoCall); switch (add->GetResultType()) { case DataType::Type::kInt32: { locations->SetInAt(0, Location::RequiresRegister()); @@ -3134,7 +3134,7 @@ void InstructionCodeGeneratorX86_64::VisitAdd(HAdd* add) { void LocationsBuilderX86_64::VisitSub(HSub* sub) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(sub, LocationSummary::kNoCall); switch (sub->GetResultType()) { case DataType::Type::kInt32: { locations->SetInAt(0, Location::RequiresRegister()); @@ -3225,7 +3225,7 @@ void InstructionCodeGeneratorX86_64::VisitSub(HSub* sub) { void LocationsBuilderX86_64::VisitMul(HMul* mul) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall); switch (mul->GetResultType()) { case DataType::Type::kInt32: { locations->SetInAt(0, Location::RequiresRegister()); @@ -3649,7 +3649,7 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemIntegral(HBinaryOperation* in } } else { SlowPathCode* slow_path = - new (GetGraph()->GetArena()) DivRemMinusOneSlowPathX86_64( + new (GetGraph()->GetAllocator()) DivRemMinusOneSlowPathX86_64( instruction, out.AsRegister(), type, is_div); codegen_->AddSlowPath(slow_path); @@ -3678,7 +3678,7 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemIntegral(HBinaryOperation* in void LocationsBuilderX86_64::VisitDiv(HDiv* div) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(div, LocationSummary::kNoCall); switch (div->GetResultType()) { case DataType::Type::kInt32: case DataType::Type::kInt64: { @@ -3761,7 +3761,7 @@ void InstructionCodeGeneratorX86_64::VisitDiv(HDiv* div) { void LocationsBuilderX86_64::VisitRem(HRem* rem) { DataType::Type type = rem->GetResultType(); LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(rem, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(rem, LocationSummary::kNoCall); switch (type) { case DataType::Type::kInt32: @@ -3818,7 +3818,7 @@ void LocationsBuilderX86_64::VisitDivZeroCheck(HDivZeroCheck* instruction) { void InstructionCodeGeneratorX86_64::VisitDivZeroCheck(HDivZeroCheck* instruction) { SlowPathCode* slow_path = - new (GetGraph()->GetArena()) DivZeroCheckSlowPathX86_64(instruction); + new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathX86_64(instruction); codegen_->AddSlowPath(slow_path); LocationSummary* locations = instruction->GetLocations(); @@ -3869,7 +3869,7 @@ void LocationsBuilderX86_64::HandleShift(HBinaryOperation* op) { DCHECK(op->IsShl() || op->IsShr() || op->IsUShr()); LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(op, LocationSummary::kNoCall); switch (op->GetResultType()) { case DataType::Type::kInt32: @@ -3945,7 +3945,7 @@ void InstructionCodeGeneratorX86_64::HandleShift(HBinaryOperation* op) { void LocationsBuilderX86_64::VisitRor(HRor* ror) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(ror, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(ror, LocationSummary::kNoCall); switch (ror->GetResultType()) { case DataType::Type::kInt32: @@ -4017,8 +4017,8 @@ void InstructionCodeGeneratorX86_64::VisitUShr(HUShr* ushr) { } void LocationsBuilderX86_64::VisitNewInstance(HNewInstance* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); InvokeRuntimeCallingConvention calling_convention; if (instruction->IsStringAlloc()) { locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument)); @@ -4046,8 +4046,8 @@ void InstructionCodeGeneratorX86_64::VisitNewInstance(HNewInstance* instruction) } void LocationsBuilderX86_64::VisitNewArray(HNewArray* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); InvokeRuntimeCallingConvention calling_convention; locations->SetOut(Location::RegisterLocation(RAX)); locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); @@ -4066,7 +4066,7 @@ void InstructionCodeGeneratorX86_64::VisitNewArray(HNewArray* instruction) { void LocationsBuilderX86_64::VisitParameterValue(HParameterValue* instruction) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); Location location = parameter_visitor_.GetNextLocation(instruction->GetType()); if (location.IsStackSlot()) { location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize()); @@ -4083,7 +4083,7 @@ void InstructionCodeGeneratorX86_64::VisitParameterValue( void LocationsBuilderX86_64::VisitCurrentMethod(HCurrentMethod* instruction) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument)); } @@ -4094,7 +4094,7 @@ void InstructionCodeGeneratorX86_64::VisitCurrentMethod( void LocationsBuilderX86_64::VisitClassTableGet(HClassTableGet* instruction) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister()); } @@ -4119,7 +4119,7 @@ void InstructionCodeGeneratorX86_64::VisitClassTableGet(HClassTableGet* instruct void LocationsBuilderX86_64::VisitNot(HNot* not_) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(not_, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::SameAsFirstInput()); } @@ -4145,7 +4145,7 @@ void InstructionCodeGeneratorX86_64::VisitNot(HNot* not_) { void LocationsBuilderX86_64::VisitBooleanNot(HBooleanNot* bool_not) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(bool_not, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::SameAsFirstInput()); } @@ -4160,7 +4160,7 @@ void InstructionCodeGeneratorX86_64::VisitBooleanNot(HBooleanNot* bool_not) { void LocationsBuilderX86_64::VisitPhi(HPhi* instruction) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) { locations->SetInAt(i, Location::Any()); } @@ -4201,10 +4201,10 @@ void LocationsBuilderX86_64::HandleFieldGet(HInstruction* instruction) { bool object_field_get_with_read_barrier = kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, - object_field_get_with_read_barrier ? - LocationSummary::kCallOnSlowPath : - LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, + object_field_get_with_read_barrier + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall); if (object_field_get_with_read_barrier && kUseBakerReadBarrier) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } @@ -4326,7 +4326,7 @@ void LocationsBuilderX86_64::HandleFieldSet(HInstruction* instruction, DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet()); LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); DataType::Type field_type = field_info.GetFieldType(); bool is_volatile = field_info.IsVolatile(); bool needs_write_barrier = @@ -4602,7 +4602,7 @@ void CodeGeneratorX86_64::GenerateImplicitNullCheck(HNullCheck* instruction) { } void CodeGeneratorX86_64::GenerateExplicitNullCheck(HNullCheck* instruction) { - SlowPathCode* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathX86_64(instruction); + SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) NullCheckSlowPathX86_64(instruction); AddSlowPath(slow_path); LocationSummary* locations = instruction->GetLocations(); @@ -4629,10 +4629,10 @@ void LocationsBuilderX86_64::VisitArrayGet(HArrayGet* instruction) { bool object_array_get_with_read_barrier = kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, - object_array_get_with_read_barrier ? - LocationSummary::kCallOnSlowPath : - LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, + object_array_get_with_read_barrier + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall); if (object_array_get_with_read_barrier && kUseBakerReadBarrier) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } @@ -4775,7 +4775,7 @@ void LocationsBuilderX86_64::VisitArraySet(HArraySet* instruction) { CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue()); bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck(); - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( instruction, may_need_runtime_call_for_type_check ? LocationSummary::kCallOnSlowPath : @@ -4864,7 +4864,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) { Location temp_loc = locations->GetTemp(0); CpuRegister temp = temp_loc.AsRegister<CpuRegister>(); if (may_need_runtime_call_for_type_check) { - slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathX86_64(instruction); + slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathX86_64(instruction); codegen_->AddSlowPath(slow_path); if (instruction->GetValueCanBeNull()) { __ testl(register_value, register_value); @@ -5002,7 +5002,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) { void LocationsBuilderX86_64::VisitArrayLength(HArrayLength* instruction) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); if (!instruction->IsEmittedAtUseSite()) { locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); @@ -5043,7 +5043,7 @@ void InstructionCodeGeneratorX86_64::VisitBoundsCheck(HBoundsCheck* instruction) LocationSummary* locations = instruction->GetLocations(); Location index_loc = locations->InAt(0); Location length_loc = locations->InAt(1); - SlowPathCode* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathX86_64(instruction); + SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) BoundsCheckSlowPathX86_64(instruction); if (length_loc.IsConstant()) { int32_t length = CodeGenerator::GetInt32ValueOf(length_loc.GetConstant()); @@ -5129,8 +5129,8 @@ void InstructionCodeGeneratorX86_64::VisitParallelMove(HParallelMove* instructio } void LocationsBuilderX86_64::VisitSuspendCheck(HSuspendCheck* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnSlowPath); // In suspend check slow path, usually there are no caller-save registers at all. // If SIMD instructions are present, however, we force spilling all live SIMD // registers in full width (since the runtime only saves/restores lower part). @@ -5157,7 +5157,7 @@ void InstructionCodeGeneratorX86_64::GenerateSuspendCheck(HSuspendCheck* instruc SuspendCheckSlowPathX86_64* slow_path = down_cast<SuspendCheckSlowPathX86_64*>(instruction->GetSlowPath()); if (slow_path == nullptr) { - slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathX86_64(instruction, successor); + slow_path = new (GetGraph()->GetAllocator()) SuspendCheckSlowPathX86_64(instruction, successor); instruction->SetSlowPath(slow_path); codegen_->AddSlowPath(slow_path); if (successor != nullptr) { @@ -5439,7 +5439,7 @@ void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) { LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier) ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind); if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } @@ -5555,7 +5555,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S if (generate_null_check || cls->MustGenerateClinitCheck()) { DCHECK(cls->CanCallRuntime()); - SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86_64( + SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathX86_64( cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck()); codegen_->AddSlowPath(slow_path); if (generate_null_check) { @@ -5572,7 +5572,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S void LocationsBuilderX86_64::VisitClinitCheck(HClinitCheck* check) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath); + new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath); locations->SetInAt(0, Location::RequiresRegister()); if (check->HasUses()) { locations->SetOut(Location::SameAsFirstInput()); @@ -5581,7 +5581,7 @@ void LocationsBuilderX86_64::VisitClinitCheck(HClinitCheck* check) { void InstructionCodeGeneratorX86_64::VisitClinitCheck(HClinitCheck* check) { // We assume the class to not be null. - SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86_64( + SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathX86_64( check->GetLoadClass(), check, check->GetDexPc(), true); codegen_->AddSlowPath(slow_path); GenerateClassInitializationCheck(slow_path, @@ -5608,7 +5608,7 @@ HLoadString::LoadKind CodeGeneratorX86_64::GetSupportedLoadStringKind( void LocationsBuilderX86_64::VisitLoadString(HLoadString* load) { LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load); - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind); if (load->GetLoadKind() == HLoadString::LoadKind::kRuntimeCall) { locations->SetOut(Location::RegisterLocation(RAX)); } else { @@ -5671,7 +5671,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) NO_THREA Label* fixup_label = codegen_->NewStringBssEntryPatch(load); // /* GcRoot<mirror::Class> */ out = *address /* PC-relative */ GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption); - SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86_64(load); + SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadStringSlowPathX86_64(load); codegen_->AddSlowPath(slow_path); __ testl(out, out); __ j(kEqual, slow_path->GetEntryLabel()); @@ -5707,7 +5707,7 @@ static Address GetExceptionTlsAddress() { void LocationsBuilderX86_64::VisitLoadException(HLoadException* load) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall); locations->SetOut(Location::RequiresRegister()); } @@ -5716,7 +5716,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadException(HLoadException* load) { } void LocationsBuilderX86_64::VisitClearException(HClearException* clear) { - new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall); } void InstructionCodeGeneratorX86_64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) { @@ -5724,8 +5724,8 @@ void InstructionCodeGeneratorX86_64::VisitClearException(HClearException* clear } void LocationsBuilderX86_64::VisitThrow(HThrow* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); } @@ -5775,7 +5775,8 @@ void LocationsBuilderX86_64::VisitInstanceOf(HInstanceOf* instruction) { break; } - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind); if (baker_read_barrier_slow_path) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } @@ -5960,8 +5961,8 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) { __ cmpl(out, Address(CpuRegister(RSP), cls.GetStackIndex())); } DCHECK(locations->OnlyCallsOnSlowPath()); - slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(instruction, - /* is_fatal */ false); + slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86_64(instruction, + /* is_fatal */ false); codegen_->AddSlowPath(slow_path); __ j(kNotEqual, slow_path->GetEntryLabel()); __ movl(out, Immediate(1)); @@ -5992,8 +5993,8 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) { // call to the runtime not using a type checking slow path). // This should also be beneficial for the other cases above. DCHECK(locations->OnlyCallsOnSlowPath()); - slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(instruction, - /* is_fatal */ false); + slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86_64(instruction, + /* is_fatal */ false); codegen_->AddSlowPath(slow_path); __ jmp(slow_path->GetEntryLabel()); if (zero.IsLinked()) { @@ -6041,7 +6042,8 @@ void LocationsBuilderX86_64::VisitCheckCast(HCheckCast* instruction) { LocationSummary::CallKind call_kind = is_fatal_slow_path ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind); locations->SetInAt(0, Location::RequiresRegister()); if (type_check_kind == TypeCheckKind::kInterfaceCheck) { // Require a register for the interface check since there is a loop that compares the class to @@ -6086,8 +6088,8 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) { bool is_type_check_slow_path_fatal = IsTypeCheckSlowPathFatal(type_check_kind, instruction->CanThrowIntoCatchBlock()); SlowPathCode* type_check_slow_path = - new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(instruction, - is_type_check_slow_path_fatal); + new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86_64(instruction, + is_type_check_slow_path_fatal); codegen_->AddSlowPath(type_check_slow_path); @@ -6285,8 +6287,8 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) { } void LocationsBuilderX86_64::VisitMonitorOperation(HMonitorOperation* instruction) { - LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); } @@ -6308,7 +6310,7 @@ void LocationsBuilderX86_64::VisitXor(HXor* instruction) { HandleBitwiseOperatio void LocationsBuilderX86_64::HandleBitwiseOperation(HBinaryOperation* instruction) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); DCHECK(instruction->GetResultType() == DataType::Type::kInt32 || instruction->GetResultType() == DataType::Type::kInt64); locations->SetInAt(0, Location::RequiresRegister()); @@ -6512,7 +6514,7 @@ void InstructionCodeGeneratorX86_64::GenerateGcRootFieldLoad( "have different sizes."); // Slow path marking the GC root `root`. - SlowPathCode* slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86_64( + SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathX86_64( instruction, root, /* unpoison_ref_before_marking */ false); codegen_->AddSlowPath(slow_path); @@ -6644,10 +6646,10 @@ void CodeGeneratorX86_64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction if (always_update_field) { DCHECK(temp1 != nullptr); DCHECK(temp2 != nullptr); - slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkAndUpdateFieldSlowPathX86_64( + slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86_64( instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp1, *temp2); } else { - slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86_64( + slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathX86_64( instruction, ref, /* unpoison_ref_before_marking */ true); } AddSlowPath(slow_path); @@ -6680,7 +6682,7 @@ void CodeGeneratorX86_64::GenerateReadBarrierSlow(HInstruction* instruction, // not used by the artReadBarrierSlow entry point. // // TODO: Unpoison `ref` when it is used by artReadBarrierSlow. - SlowPathCode* slow_path = new (GetGraph()->GetArena()) + SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) ReadBarrierForHeapReferenceSlowPathX86_64(instruction, out, ref, obj, offset, index); AddSlowPath(slow_path); @@ -6716,7 +6718,7 @@ void CodeGeneratorX86_64::GenerateReadBarrierForRootSlow(HInstruction* instructi // Note that GC roots are not affected by heap poisoning, so we do // not need to do anything special for this here. SlowPathCode* slow_path = - new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathX86_64(instruction, out, root); + new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathX86_64(instruction, out, root); AddSlowPath(slow_path); __ jmp(slow_path->GetEntryLabel()); @@ -6736,7 +6738,7 @@ void InstructionCodeGeneratorX86_64::VisitBoundType(HBoundType* instruction ATTR // Simple implementation of packed switch - generate cascaded compare/jumps. void LocationsBuilderX86_64::VisitPackedSwitch(HPackedSwitch* switch_instr) { LocationSummary* locations = - new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall); + new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); locations->AddTemp(Location::RequiresRegister()); locations->AddTemp(Location::RequiresRegister()); @@ -7024,22 +7026,22 @@ void CodeGeneratorX86_64::Finalize(CodeAllocator* allocator) { } Address CodeGeneratorX86_64::LiteralDoubleAddress(double v) { - AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddDouble(v)); + AssemblerFixup* fixup = new (GetGraph()->GetAllocator()) RIPFixup(*this, __ AddDouble(v)); return Address::RIP(fixup); } Address CodeGeneratorX86_64::LiteralFloatAddress(float v) { - AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddFloat(v)); + AssemblerFixup* fixup = new (GetGraph()->GetAllocator()) RIPFixup(*this, __ AddFloat(v)); return Address::RIP(fixup); } Address CodeGeneratorX86_64::LiteralInt32Address(int32_t v) { - AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddInt32(v)); + AssemblerFixup* fixup = new (GetGraph()->GetAllocator()) RIPFixup(*this, __ AddInt32(v)); return Address::RIP(fixup); } Address CodeGeneratorX86_64::LiteralInt64Address(int64_t v) { - AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddInt64(v)); + AssemblerFixup* fixup = new (GetGraph()->GetAllocator()) RIPFixup(*this, __ AddInt64(v)); return Address::RIP(fixup); } @@ -7058,7 +7060,7 @@ void CodeGeneratorX86_64::MoveFromReturnRegister(Location trg, DataType::Type ty } // Let the parallel move resolver take care of all of this. - HParallelMove parallel_move(GetGraph()->GetArena()); + HParallelMove parallel_move(GetGraph()->GetAllocator()); parallel_move.AddMove(return_loc, trg, type, nullptr); GetMoveResolver()->EmitNativeCode(¶llel_move); } @@ -7066,7 +7068,7 @@ void CodeGeneratorX86_64::MoveFromReturnRegister(Location trg, DataType::Type ty Address CodeGeneratorX86_64::LiteralCaseTable(HPackedSwitch* switch_instr) { // Create a fixup to be used to create and address the jump table. JumpTableRIPFixup* table_fixup = - new (GetGraph()->GetArena()) JumpTableRIPFixup(*this, switch_instr); + new (GetGraph()->GetAllocator()) JumpTableRIPFixup(*this, switch_instr); // We have to populate the jump tables. fixups_to_jump_tables_.push_back(table_fixup); diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc index b558eb17a7..d8ebac95a8 100644 --- a/compiler/optimizing/code_sinking.cc +++ b/compiler/optimizing/code_sinking.cc @@ -16,6 +16,10 @@ #include "code_sinking.h" +#include "base/arena_bit_vector.h" +#include "base/bit_vector-inl.h" +#include "base/scoped_arena_allocator.h" +#include "base/scoped_arena_containers.h" #include "common_dominator.h" #include "nodes.h" @@ -115,7 +119,7 @@ static bool IsInterestingInstruction(HInstruction* instruction) { static void AddInstruction(HInstruction* instruction, const ArenaBitVector& processed_instructions, const ArenaBitVector& discard_blocks, - ArenaVector<HInstruction*>* worklist) { + ScopedArenaVector<HInstruction*>* worklist) { // Add to the work list if the instruction is not in the list of blocks // to discard, hasn't been already processed and is of interest. if (!discard_blocks.IsBitSet(instruction->GetBlock()->GetBlockId()) && @@ -128,7 +132,7 @@ static void AddInstruction(HInstruction* instruction, static void AddInputs(HInstruction* instruction, const ArenaBitVector& processed_instructions, const ArenaBitVector& discard_blocks, - ArenaVector<HInstruction*>* worklist) { + ScopedArenaVector<HInstruction*>* worklist) { for (HInstruction* input : instruction->GetInputs()) { AddInstruction(input, processed_instructions, discard_blocks, worklist); } @@ -137,7 +141,7 @@ static void AddInputs(HInstruction* instruction, static void AddInputs(HBasicBlock* block, const ArenaBitVector& processed_instructions, const ArenaBitVector& discard_blocks, - ArenaVector<HInstruction*>* worklist) { + ScopedArenaVector<HInstruction*>* worklist) { for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { AddInputs(it.Current(), processed_instructions, discard_blocks, worklist); } @@ -242,17 +246,19 @@ static HInstruction* FindIdealPosition(HInstruction* instruction, void CodeSinking::SinkCodeToUncommonBranch(HBasicBlock* end_block) { - // Local allocator to discard data structures created below at the end of - // this optimization. - ArenaAllocator allocator(graph_->GetArena()->GetArenaPool()); + // Local allocator to discard data structures created below at the end of this optimization. + ScopedArenaAllocator allocator(graph_->GetArenaStack()); size_t number_of_instructions = graph_->GetCurrentInstructionId(); - ArenaVector<HInstruction*> worklist(allocator.Adapter(kArenaAllocMisc)); + ScopedArenaVector<HInstruction*> worklist(allocator.Adapter(kArenaAllocMisc)); ArenaBitVector processed_instructions(&allocator, number_of_instructions, /* expandable */ false); + processed_instructions.ClearAllBits(); ArenaBitVector post_dominated(&allocator, graph_->GetBlocks().size(), /* expandable */ false); + post_dominated.ClearAllBits(); ArenaBitVector instructions_that_can_move( &allocator, number_of_instructions, /* expandable */ false); - ArenaVector<HInstruction*> move_in_order(allocator.Adapter(kArenaAllocMisc)); + instructions_that_can_move.ClearAllBits(); + ScopedArenaVector<HInstruction*> move_in_order(allocator.Adapter(kArenaAllocMisc)); // Step (1): Visit post order to get a subset of blocks post dominated by `end_block`. // TODO(ngeoffray): Getting the full set of post-dominated shoud be done by diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc index 896fcfa20d..e35c7c734b 100644 --- a/compiler/optimizing/codegen_test.cc +++ b/compiler/optimizing/codegen_test.cc @@ -72,34 +72,37 @@ static ::std::vector<CodegenTargetConfig> GetTargetConfigs() { return v; } -static void TestCode(const uint16_t* data, - bool has_result = false, - int32_t expected = 0) { +class CodegenTest : public OptimizingUnitTest { + protected: + void TestCode(const uint16_t* data, bool has_result = false, int32_t expected = 0); + void TestCodeLong(const uint16_t* data, bool has_result, int64_t expected); + void TestComparison(IfCondition condition, + int64_t i, + int64_t j, + DataType::Type type, + const CodegenTargetConfig target_config); +}; + +void CodegenTest::TestCode(const uint16_t* data, bool has_result, int32_t expected) { for (const CodegenTargetConfig& target_config : GetTargetConfigs()) { - ArenaPool pool; - ArenaAllocator arena(&pool); - HGraph* graph = CreateCFG(&arena, data); + ResetPoolAndAllocator(); + HGraph* graph = CreateCFG(data); // Remove suspend checks, they cannot be executed in this context. RemoveSuspendChecks(graph); RunCode(target_config, graph, [](HGraph*) {}, has_result, expected); } } -static void TestCodeLong(const uint16_t* data, - bool has_result, - int64_t expected) { +void CodegenTest::TestCodeLong(const uint16_t* data, bool has_result, int64_t expected) { for (const CodegenTargetConfig& target_config : GetTargetConfigs()) { - ArenaPool pool; - ArenaAllocator arena(&pool); - HGraph* graph = CreateCFG(&arena, data, DataType::Type::kInt64); + ResetPoolAndAllocator(); + HGraph* graph = CreateCFG(data, DataType::Type::kInt64); // Remove suspend checks, they cannot be executed in this context. RemoveSuspendChecks(graph); RunCode(target_config, graph, [](HGraph*) {}, has_result, expected); } } -class CodegenTest : public CommonCompilerTest {}; - TEST_F(CodegenTest, ReturnVoid) { const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(Instruction::RETURN_VOID); TestCode(data); @@ -412,28 +415,25 @@ TEST_F(CodegenTest, ReturnMulIntLit16) { TEST_F(CodegenTest, NonMaterializedCondition) { for (CodegenTargetConfig target_config : GetTargetConfigs()) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - - HGraph* graph = CreateGraph(&allocator); + HGraph* graph = CreateGraph(); - HBasicBlock* entry = new (&allocator) HBasicBlock(graph); + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(entry); graph->SetEntryBlock(entry); - entry->AddInstruction(new (&allocator) HGoto()); + entry->AddInstruction(new (GetAllocator()) HGoto()); - HBasicBlock* first_block = new (&allocator) HBasicBlock(graph); + HBasicBlock* first_block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(first_block); entry->AddSuccessor(first_block); HIntConstant* constant0 = graph->GetIntConstant(0); HIntConstant* constant1 = graph->GetIntConstant(1); - HEqual* equal = new (&allocator) HEqual(constant0, constant0); + HEqual* equal = new (GetAllocator()) HEqual(constant0, constant0); first_block->AddInstruction(equal); - first_block->AddInstruction(new (&allocator) HIf(equal)); + first_block->AddInstruction(new (GetAllocator()) HIf(equal)); - HBasicBlock* then_block = new (&allocator) HBasicBlock(graph); - HBasicBlock* else_block = new (&allocator) HBasicBlock(graph); - HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph); + HBasicBlock* then_block = new (GetAllocator()) HBasicBlock(graph); + HBasicBlock* else_block = new (GetAllocator()) HBasicBlock(graph); + HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph); graph->SetExitBlock(exit_block); graph->AddBlock(then_block); @@ -444,9 +444,9 @@ TEST_F(CodegenTest, NonMaterializedCondition) { then_block->AddSuccessor(exit_block); else_block->AddSuccessor(exit_block); - exit_block->AddInstruction(new (&allocator) HExit()); - then_block->AddInstruction(new (&allocator) HReturn(constant0)); - else_block->AddInstruction(new (&allocator) HReturn(constant1)); + exit_block->AddInstruction(new (GetAllocator()) HExit()); + then_block->AddInstruction(new (GetAllocator()) HReturn(constant0)); + else_block->AddInstruction(new (GetAllocator()) HReturn(constant1)); ASSERT_FALSE(equal->IsEmittedAtUseSite()); graph->BuildDominatorTree(); @@ -455,7 +455,7 @@ TEST_F(CodegenTest, NonMaterializedCondition) { auto hook_before_codegen = [](HGraph* graph_in) { HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0]; - HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena()); + HParallelMove* move = new (graph_in->GetAllocator()) HParallelMove(graph_in->GetAllocator()); block->InsertInstructionBefore(move, block->GetLastInstruction()); }; @@ -475,19 +475,17 @@ TEST_F(CodegenTest, MaterializedCondition1) { int rhs[] = {2, 1, 2, -1, 0xabc}; for (size_t i = 0; i < arraysize(lhs); i++) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - HGraph* graph = CreateGraph(&allocator); + HGraph* graph = CreateGraph(); - HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph); + HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(entry_block); graph->SetEntryBlock(entry_block); - entry_block->AddInstruction(new (&allocator) HGoto()); - HBasicBlock* code_block = new (&allocator) HBasicBlock(graph); + entry_block->AddInstruction(new (GetAllocator()) HGoto()); + HBasicBlock* code_block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(code_block); - HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph); + HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(exit_block); - exit_block->AddInstruction(new (&allocator) HExit()); + exit_block->AddInstruction(new (GetAllocator()) HExit()); entry_block->AddSuccessor(code_block); code_block->AddSuccessor(exit_block); @@ -503,7 +501,8 @@ TEST_F(CodegenTest, MaterializedCondition1) { graph->BuildDominatorTree(); auto hook_before_codegen = [](HGraph* graph_in) { HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0]; - HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena()); + HParallelMove* move = + new (graph_in->GetAllocator()) HParallelMove(graph_in->GetAllocator()); block->InsertInstructionBefore(move, block->GetLastInstruction()); }; RunCode(target_config, graph, hook_before_codegen, true, lhs[i] < rhs[i]); @@ -523,24 +522,22 @@ TEST_F(CodegenTest, MaterializedCondition2) { for (size_t i = 0; i < arraysize(lhs); i++) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - HGraph* graph = CreateGraph(&allocator); + HGraph* graph = CreateGraph(); - HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph); + HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(entry_block); graph->SetEntryBlock(entry_block); - entry_block->AddInstruction(new (&allocator) HGoto()); + entry_block->AddInstruction(new (GetAllocator()) HGoto()); - HBasicBlock* if_block = new (&allocator) HBasicBlock(graph); + HBasicBlock* if_block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(if_block); - HBasicBlock* if_true_block = new (&allocator) HBasicBlock(graph); + HBasicBlock* if_true_block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(if_true_block); - HBasicBlock* if_false_block = new (&allocator) HBasicBlock(graph); + HBasicBlock* if_false_block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(if_false_block); - HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph); + HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(exit_block); - exit_block->AddInstruction(new (&allocator) HExit()); + exit_block->AddInstruction(new (GetAllocator()) HExit()); graph->SetEntryBlock(entry_block); entry_block->AddSuccessor(if_block); @@ -571,7 +568,8 @@ TEST_F(CodegenTest, MaterializedCondition2) { graph->BuildDominatorTree(); auto hook_before_codegen = [](HGraph* graph_in) { HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0]; - HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena()); + HParallelMove* move = + new (graph_in->GetAllocator()) HParallelMove(graph_in->GetAllocator()); block->InsertInstructionBefore(move, block->GetLastInstruction()); }; RunCode(target_config, graph, hook_before_codegen, true, lhs[i] < rhs[i]); @@ -599,27 +597,25 @@ TEST_F(CodegenTest, ReturnDivInt2Addr) { } // Helper method. -static void TestComparison(IfCondition condition, - int64_t i, - int64_t j, - DataType::Type type, - const CodegenTargetConfig target_config) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - HGraph* graph = CreateGraph(&allocator); - - HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph); +void CodegenTest::TestComparison(IfCondition condition, + int64_t i, + int64_t j, + DataType::Type type, + const CodegenTargetConfig target_config) { + HGraph* graph = CreateGraph(); + + HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(entry_block); graph->SetEntryBlock(entry_block); - entry_block->AddInstruction(new (&allocator) HGoto()); + entry_block->AddInstruction(new (GetAllocator()) HGoto()); - HBasicBlock* block = new (&allocator) HBasicBlock(graph); + HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(block); - HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph); + HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(exit_block); graph->SetExitBlock(exit_block); - exit_block->AddInstruction(new (&allocator) HExit()); + exit_block->AddInstruction(new (GetAllocator()) HExit()); entry_block->AddSuccessor(block); block->AddSuccessor(exit_block); @@ -641,48 +637,48 @@ static void TestComparison(IfCondition condition, const uint64_t y = j; switch (condition) { case kCondEQ: - comparison = new (&allocator) HEqual(op1, op2); + comparison = new (GetAllocator()) HEqual(op1, op2); expected_result = (i == j); break; case kCondNE: - comparison = new (&allocator) HNotEqual(op1, op2); + comparison = new (GetAllocator()) HNotEqual(op1, op2); expected_result = (i != j); break; case kCondLT: - comparison = new (&allocator) HLessThan(op1, op2); + comparison = new (GetAllocator()) HLessThan(op1, op2); expected_result = (i < j); break; case kCondLE: - comparison = new (&allocator) HLessThanOrEqual(op1, op2); + comparison = new (GetAllocator()) HLessThanOrEqual(op1, op2); expected_result = (i <= j); break; case kCondGT: - comparison = new (&allocator) HGreaterThan(op1, op2); + comparison = new (GetAllocator()) HGreaterThan(op1, op2); expected_result = (i > j); break; case kCondGE: - comparison = new (&allocator) HGreaterThanOrEqual(op1, op2); + comparison = new (GetAllocator()) HGreaterThanOrEqual(op1, op2); expected_result = (i >= j); break; case kCondB: - comparison = new (&allocator) HBelow(op1, op2); + comparison = new (GetAllocator()) HBelow(op1, op2); expected_result = (x < y); break; case kCondBE: - comparison = new (&allocator) HBelowOrEqual(op1, op2); + comparison = new (GetAllocator()) HBelowOrEqual(op1, op2); expected_result = (x <= y); break; case kCondA: - comparison = new (&allocator) HAbove(op1, op2); + comparison = new (GetAllocator()) HAbove(op1, op2); expected_result = (x > y); break; case kCondAE: - comparison = new (&allocator) HAboveOrEqual(op1, op2); + comparison = new (GetAllocator()) HAboveOrEqual(op1, op2); expected_result = (x >= y); break; } block->AddInstruction(comparison); - block->AddInstruction(new (&allocator) HReturn(comparison)); + block->AddInstruction(new (GetAllocator()) HReturn(comparison)); graph->BuildDominatorTree(); RunCode(target_config, graph, [](HGraph*) {}, true, expected_result); @@ -718,9 +714,7 @@ TEST_F(CodegenTest, ComparisonsLong) { TEST_F(CodegenTest, ARMVIXLParallelMoveResolver) { std::unique_ptr<const ArmInstructionSetFeatures> features( ArmInstructionSetFeatures::FromCppDefines()); - ArenaPool pool; - ArenaAllocator allocator(&pool); - HGraph* graph = CreateGraph(&allocator); + HGraph* graph = CreateGraph(); arm::CodeGeneratorARMVIXL codegen(graph, *features.get(), CompilerOptions()); codegen.Initialize(); @@ -729,7 +723,7 @@ TEST_F(CodegenTest, ARMVIXLParallelMoveResolver) { // int mem2) which was faulty (before the fix). So previously GPR and FP scratch registers were // used as temps; however GPR scratch register is required for big stack offsets which don't fit // LDR encoding. So the following code is a regression test for that situation. - HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena()); + HParallelMove* move = new (graph->GetAllocator()) HParallelMove(graph->GetAllocator()); move->AddMove(Location::StackSlot(0), Location::StackSlot(8192), DataType::Type::kInt32, nullptr); move->AddMove(Location::StackSlot(8192), Location::StackSlot(0), DataType::Type::kInt32, nullptr); codegen.GetMoveResolver()->EmitNativeCode(move); @@ -744,9 +738,7 @@ TEST_F(CodegenTest, ARMVIXLParallelMoveResolver) { TEST_F(CodegenTest, ARM64ParallelMoveResolverB34760542) { std::unique_ptr<const Arm64InstructionSetFeatures> features( Arm64InstructionSetFeatures::FromCppDefines()); - ArenaPool pool; - ArenaAllocator allocator(&pool); - HGraph* graph = CreateGraph(&allocator); + HGraph* graph = CreateGraph(); arm64::CodeGeneratorARM64 codegen(graph, *features.get(), CompilerOptions()); codegen.Initialize(); @@ -777,7 +769,7 @@ TEST_F(CodegenTest, ARM64ParallelMoveResolverB34760542) { // The solution used so far is to use a floating-point temp register // (D31) in step #2, so that IP1 is available for step #3. - HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena()); + HParallelMove* move = new (graph->GetAllocator()) HParallelMove(graph->GetAllocator()); move->AddMove(Location::DoubleStackSlot(0), Location::DoubleStackSlot(257), DataType::Type::kFloat64, @@ -796,16 +788,14 @@ TEST_F(CodegenTest, ARM64ParallelMoveResolverB34760542) { TEST_F(CodegenTest, ARM64ParallelMoveResolverSIMD) { std::unique_ptr<const Arm64InstructionSetFeatures> features( Arm64InstructionSetFeatures::FromCppDefines()); - ArenaPool pool; - ArenaAllocator allocator(&pool); - HGraph* graph = CreateGraph(&allocator); + HGraph* graph = CreateGraph(); arm64::CodeGeneratorARM64 codegen(graph, *features.get(), CompilerOptions()); codegen.Initialize(); graph->SetHasSIMD(true); for (int i = 0; i < 2; i++) { - HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena()); + HParallelMove* move = new (graph->GetAllocator()) HParallelMove(graph->GetAllocator()); move->AddMove(Location::SIMDStackSlot(0), Location::SIMDStackSlot(257), DataType::Type::kFloat64, @@ -841,33 +831,31 @@ TEST_F(CodegenTest, MipsClobberRA) { return; } - ArenaPool pool; - ArenaAllocator allocator(&pool); - HGraph* graph = CreateGraph(&allocator); + HGraph* graph = CreateGraph(); - HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph); + HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(entry_block); graph->SetEntryBlock(entry_block); - entry_block->AddInstruction(new (&allocator) HGoto()); + entry_block->AddInstruction(new (GetAllocator()) HGoto()); - HBasicBlock* block = new (&allocator) HBasicBlock(graph); + HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(block); - HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph); + HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(exit_block); graph->SetExitBlock(exit_block); - exit_block->AddInstruction(new (&allocator) HExit()); + exit_block->AddInstruction(new (GetAllocator()) HExit()); entry_block->AddSuccessor(block); block->AddSuccessor(exit_block); // To simplify matters, don't create PC-relative HLoadClass or HLoadString. // Instead, generate HMipsComputeBaseMethodAddress directly. - HMipsComputeBaseMethodAddress* base = new (&allocator) HMipsComputeBaseMethodAddress(); + HMipsComputeBaseMethodAddress* base = new (GetAllocator()) HMipsComputeBaseMethodAddress(); block->AddInstruction(base); // HMipsComputeBaseMethodAddress is defined as int, so just make the // compiled method return it. - block->AddInstruction(new (&allocator) HReturn(base)); + block->AddInstruction(new (GetAllocator()) HReturn(base)); graph->BuildDominatorTree(); diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h index aa4f5da3f0..bcbcc12349 100644 --- a/compiler/optimizing/codegen_test_utils.h +++ b/compiler/optimizing/codegen_test_utils.h @@ -295,10 +295,15 @@ static void RunCodeNoCheck(CodeGenerator* codegen, const std::function<void(HGraph*)>& hook_before_codegen, bool has_result, Expected expected) { - SsaLivenessAnalysis liveness(graph, codegen); - PrepareForRegisterAllocation(graph).Run(); - liveness.Analyze(); - RegisterAllocator::Create(graph->GetArena(), codegen, liveness)->AllocateRegisters(); + { + ScopedArenaAllocator local_allocator(graph->GetArenaStack()); + SsaLivenessAnalysis liveness(graph, codegen, &local_allocator); + PrepareForRegisterAllocation(graph).Run(); + liveness.Analyze(); + std::unique_ptr<RegisterAllocator> register_allocator = + RegisterAllocator::Create(&local_allocator, codegen, liveness); + register_allocator->AllocateRegisters(); + } hook_before_codegen(graph); InternalCodeAllocator allocator; codegen->Compile(&allocator); @@ -331,7 +336,7 @@ static void RunCode(CodegenTargetConfig target_config, CodeGenerator* create_codegen_arm_vixl32(HGraph* graph, const CompilerOptions& compiler_options) { std::unique_ptr<const ArmInstructionSetFeatures> features_arm( ArmInstructionSetFeatures::FromCppDefines()); - return new (graph->GetArena()) + return new (graph->GetAllocator()) TestCodeGeneratorARMVIXL(graph, *features_arm.get(), compiler_options); } #endif @@ -340,7 +345,7 @@ CodeGenerator* create_codegen_arm_vixl32(HGraph* graph, const CompilerOptions& c CodeGenerator* create_codegen_arm64(HGraph* graph, const CompilerOptions& compiler_options) { std::unique_ptr<const Arm64InstructionSetFeatures> features_arm64( Arm64InstructionSetFeatures::FromCppDefines()); - return new (graph->GetArena()) + return new (graph->GetAllocator()) TestCodeGeneratorARM64(graph, *features_arm64.get(), compiler_options); } #endif @@ -349,7 +354,8 @@ CodeGenerator* create_codegen_arm64(HGraph* graph, const CompilerOptions& compil CodeGenerator* create_codegen_x86(HGraph* graph, const CompilerOptions& compiler_options) { std::unique_ptr<const X86InstructionSetFeatures> features_x86( X86InstructionSetFeatures::FromCppDefines()); - return new (graph->GetArena()) TestCodeGeneratorX86(graph, *features_x86.get(), compiler_options); + return new (graph->GetAllocator()) TestCodeGeneratorX86( + graph, *features_x86.get(), compiler_options); } #endif @@ -357,7 +363,7 @@ CodeGenerator* create_codegen_x86(HGraph* graph, const CompilerOptions& compiler CodeGenerator* create_codegen_x86_64(HGraph* graph, const CompilerOptions& compiler_options) { std::unique_ptr<const X86_64InstructionSetFeatures> features_x86_64( X86_64InstructionSetFeatures::FromCppDefines()); - return new (graph->GetArena()) + return new (graph->GetAllocator()) x86_64::CodeGeneratorX86_64(graph, *features_x86_64.get(), compiler_options); } #endif @@ -366,7 +372,7 @@ CodeGenerator* create_codegen_x86_64(HGraph* graph, const CompilerOptions& compi CodeGenerator* create_codegen_mips(HGraph* graph, const CompilerOptions& compiler_options) { std::unique_ptr<const MipsInstructionSetFeatures> features_mips( MipsInstructionSetFeatures::FromCppDefines()); - return new (graph->GetArena()) + return new (graph->GetAllocator()) mips::CodeGeneratorMIPS(graph, *features_mips.get(), compiler_options); } #endif @@ -375,7 +381,7 @@ CodeGenerator* create_codegen_mips(HGraph* graph, const CompilerOptions& compile CodeGenerator* create_codegen_mips64(HGraph* graph, const CompilerOptions& compiler_options) { std::unique_ptr<const Mips64InstructionSetFeatures> features_mips64( Mips64InstructionSetFeatures::FromCppDefines()); - return new (graph->GetArena()) + return new (graph->GetAllocator()) mips64::CodeGeneratorMIPS64(graph, *features_mips64.get(), compiler_options); } #endif diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc index c85a2e3e70..e1980e080e 100644 --- a/compiler/optimizing/constant_folding_test.cc +++ b/compiler/optimizing/constant_folding_test.cc @@ -32,11 +32,9 @@ namespace art { /** * Fixture class for the constant folding and dce tests. */ -class ConstantFoldingTest : public CommonCompilerTest { +class ConstantFoldingTest : public OptimizingUnitTest { public: - ConstantFoldingTest() : pool_(), allocator_(&pool_) { - graph_ = CreateGraph(&allocator_); - } + ConstantFoldingTest() : graph_(nullptr) { } void TestCode(const uint16_t* data, const std::string& expected_before, @@ -44,7 +42,7 @@ class ConstantFoldingTest : public CommonCompilerTest { const std::string& expected_after_dce, const std::function<void(HGraph*)>& check_after_cf, DataType::Type return_type = DataType::Type::kInt32) { - graph_ = CreateCFG(&allocator_, data, return_type); + graph_ = CreateCFG(data, return_type); TestCodeOnReadyGraph(expected_before, expected_after_cf, expected_after_dce, @@ -88,8 +86,6 @@ class ConstantFoldingTest : public CommonCompilerTest { EXPECT_EQ(expected_after_dce, actual_after_dce); } - ArenaPool pool_; - ArenaAllocator allocator_; HGraph* graph_; }; @@ -742,46 +738,46 @@ TEST_F(ConstantFoldingTest, ConstantCondition) { * in the bytecode, we need to set up the graph explicitly. */ TEST_F(ConstantFoldingTest, UnsignedComparisonsWithZero) { - graph_ = CreateGraph(&allocator_); - HBasicBlock* entry_block = new (&allocator_) HBasicBlock(graph_); + graph_ = CreateGraph(); + HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(entry_block); graph_->SetEntryBlock(entry_block); - HBasicBlock* block = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(block); - HBasicBlock* exit_block = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(exit_block); graph_->SetExitBlock(exit_block); entry_block->AddSuccessor(block); block->AddSuccessor(exit_block); // Make various unsigned comparisons with zero against a parameter. - HInstruction* parameter = new (&allocator_) HParameterValue( + HInstruction* parameter = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32, true); entry_block->AddInstruction(parameter); - entry_block->AddInstruction(new (&allocator_) HGoto()); + entry_block->AddInstruction(new (GetAllocator()) HGoto()); HInstruction* zero = graph_->GetIntConstant(0); HInstruction* last; - block->AddInstruction(last = new (&allocator_) HAbove(zero, parameter)); - block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0)); - block->AddInstruction(last = new (&allocator_) HAbove(parameter, zero)); - block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0)); - block->AddInstruction(last = new (&allocator_) HAboveOrEqual(zero, parameter)); - block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0)); - block->AddInstruction(last = new (&allocator_) HAboveOrEqual(parameter, zero)); - block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0)); - block->AddInstruction(last = new (&allocator_) HBelow(zero, parameter)); - block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0)); - block->AddInstruction(last = new (&allocator_) HBelow(parameter, zero)); - block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0)); - block->AddInstruction(last = new (&allocator_) HBelowOrEqual(zero, parameter)); - block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0)); - block->AddInstruction(last = new (&allocator_) HBelowOrEqual(parameter, zero)); - block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0)); - block->AddInstruction(new (&allocator_) HReturn(zero)); - - exit_block->AddInstruction(new (&allocator_) HExit()); + block->AddInstruction(last = new (GetAllocator()) HAbove(zero, parameter)); + block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0)); + block->AddInstruction(last = new (GetAllocator()) HAbove(parameter, zero)); + block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0)); + block->AddInstruction(last = new (GetAllocator()) HAboveOrEqual(zero, parameter)); + block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0)); + block->AddInstruction(last = new (GetAllocator()) HAboveOrEqual(parameter, zero)); + block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0)); + block->AddInstruction(last = new (GetAllocator()) HBelow(zero, parameter)); + block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0)); + block->AddInstruction(last = new (GetAllocator()) HBelow(parameter, zero)); + block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0)); + block->AddInstruction(last = new (GetAllocator()) HBelowOrEqual(zero, parameter)); + block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0)); + block->AddInstruction(last = new (GetAllocator()) HBelowOrEqual(parameter, zero)); + block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0)); + block->AddInstruction(new (GetAllocator()) HReturn(zero)); + + exit_block->AddInstruction(new (GetAllocator()) HExit()); graph_->BuildDominatorTree(); diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.cc b/compiler/optimizing/constructor_fence_redundancy_elimination.cc index ff7ce60905..4a66cd2265 100644 --- a/compiler/optimizing/constructor_fence_redundancy_elimination.cc +++ b/compiler/optimizing/constructor_fence_redundancy_elimination.cc @@ -17,6 +17,8 @@ #include "constructor_fence_redundancy_elimination.h" #include "base/arena_allocator.h" +#include "base/scoped_arena_allocator.h" +#include "base/scoped_arena_containers.h" namespace art { @@ -27,7 +29,7 @@ class CFREVisitor : public HGraphVisitor { public: CFREVisitor(HGraph* graph, OptimizingCompilerStats* stats) : HGraphVisitor(graph), - scoped_allocator_(graph->GetArena()->GetArenaPool()), + scoped_allocator_(graph->GetArenaStack()), candidate_fences_(scoped_allocator_.Adapter(kArenaAllocCFRE)), candidate_fence_targets_(scoped_allocator_.Adapter(kArenaAllocCFRE)), stats_(stats) {} @@ -227,9 +229,8 @@ class CFREVisitor : public HGraphVisitor { MaybeRecordStat(stats_, MethodCompilationStat::kConstructorFenceRemovedCFRE); } - // Phase-local heap memory allocator for CFRE optimizer. Storage obtained - // through this allocator is immediately released when the CFRE optimizer is done. - ArenaAllocator scoped_allocator_; + // Phase-local heap memory allocator for CFRE optimizer. + ScopedArenaAllocator scoped_allocator_; // Set of constructor fences that we've seen in the current block. // Each constructor fences acts as a guard for one or more `targets`. @@ -237,11 +238,11 @@ class CFREVisitor : public HGraphVisitor { // // Fences are in succession order (e.g. fence[i] succeeds fence[i-1] // within the same basic block). - ArenaVector<HConstructorFence*> candidate_fences_; + ScopedArenaVector<HConstructorFence*> candidate_fences_; // Stores a set of the fence targets, to allow faster lookup of whether // a detected publish is a target of one of the candidate fences. - ArenaHashSet<HInstruction*> candidate_fence_targets_; + ScopedArenaHashSet<HInstruction*> candidate_fence_targets_; // Used to record stats about the optimization. OptimizingCompilerStats* const stats_; diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc index 9b094e989e..5117e07a12 100644 --- a/compiler/optimizing/dead_code_elimination.cc +++ b/compiler/optimizing/dead_code_elimination.cc @@ -24,7 +24,7 @@ namespace art { static void MarkReachableBlocks(HGraph* graph, ArenaBitVector* visited) { - ArenaVector<HBasicBlock*> worklist(graph->GetArena()->Adapter(kArenaAllocDCE)); + ArenaVector<HBasicBlock*> worklist(graph->GetAllocator()->Adapter(kArenaAllocDCE)); constexpr size_t kDefaultWorlistSize = 8; worklist.reserve(kDefaultWorlistSize); visited->SetBit(graph->GetEntryBlock()->GetBlockId()); @@ -306,7 +306,7 @@ void HDeadCodeElimination::ConnectSuccessiveBlocks() { bool HDeadCodeElimination::RemoveDeadBlocks() { // Classify blocks as reachable/unreachable. - ArenaAllocator* allocator = graph_->GetArena(); + ArenaAllocator* allocator = graph_->GetAllocator(); ArenaBitVector live_blocks(allocator, graph_->GetBlocks().size(), false, kArenaAllocDCE); MarkReachableBlocks(graph_, &live_blocks); diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc index 96fa5406b2..929572ee3b 100644 --- a/compiler/optimizing/dead_code_elimination_test.cc +++ b/compiler/optimizing/dead_code_elimination_test.cc @@ -27,14 +27,17 @@ namespace art { -class DeadCodeEliminationTest : public CommonCompilerTest {}; - -static void TestCode(const uint16_t* data, - const std::string& expected_before, - const std::string& expected_after) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - HGraph* graph = CreateCFG(&allocator, data); +class DeadCodeEliminationTest : public OptimizingUnitTest { + protected: + void TestCode(const uint16_t* data, + const std::string& expected_before, + const std::string& expected_after); +}; + +void DeadCodeEliminationTest::TestCode(const uint16_t* data, + const std::string& expected_before, + const std::string& expected_after) { + HGraph* graph = CreateCFG(data); ASSERT_NE(graph, nullptr); StringPrettyPrinter printer_before(graph); diff --git a/compiler/optimizing/dominator_test.cc b/compiler/optimizing/dominator_test.cc index 50c677adf5..6bf3a5943f 100644 --- a/compiler/optimizing/dominator_test.cc +++ b/compiler/optimizing/dominator_test.cc @@ -24,12 +24,13 @@ namespace art { -class OptimizerTest : public CommonCompilerTest {}; +class OptimizerTest : public OptimizingUnitTest { + protected: + void TestCode(const uint16_t* data, const uint32_t* blocks, size_t blocks_length); +}; -static void TestCode(const uint16_t* data, const uint32_t* blocks, size_t blocks_length) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - HGraph* graph = CreateCFG(&allocator, data); +void OptimizerTest::TestCode(const uint16_t* data, const uint32_t* blocks, size_t blocks_length) { + HGraph* graph = CreateCFG(data); ASSERT_EQ(graph->GetBlocks().size(), blocks_length); for (size_t i = 0, e = blocks_length; i < e; ++i) { if (blocks[i] == kInvalidBlockId) { diff --git a/compiler/optimizing/emit_swap_mips_test.cc b/compiler/optimizing/emit_swap_mips_test.cc index 0e9c81dae3..36e932c67a 100644 --- a/compiler/optimizing/emit_swap_mips_test.cc +++ b/compiler/optimizing/emit_swap_mips_test.cc @@ -25,16 +25,15 @@ namespace art { -class EmitSwapMipsTest : public ::testing::Test { +class EmitSwapMipsTest : public OptimizingUnitTest { public: void SetUp() OVERRIDE { - allocator_.reset(new ArenaAllocator(&pool_)); - graph_ = CreateGraph(allocator_.get()); + graph_ = CreateGraph(); isa_features_ = MipsInstructionSetFeatures::FromCppDefines(); - codegen_ = new (graph_->GetArena()) mips::CodeGeneratorMIPS(graph_, - *isa_features_.get(), - CompilerOptions()); - moves_ = new (allocator_.get()) HParallelMove(allocator_.get()); + codegen_ = new (graph_->GetAllocator()) mips::CodeGeneratorMIPS(graph_, + *isa_features_.get(), + CompilerOptions()); + moves_ = new (GetAllocator()) HParallelMove(GetAllocator()); test_helper_.reset( new AssemblerTestInfrastructure(GetArchitectureString(), GetAssemblerCmdName(), @@ -47,8 +46,9 @@ class EmitSwapMipsTest : public ::testing::Test { } void TearDown() OVERRIDE { - allocator_.reset(); test_helper_.reset(); + isa_features_.reset(); + ResetPoolAndAllocator(); } // Get the typically used name for this architecture. @@ -104,12 +104,10 @@ class EmitSwapMipsTest : public ::testing::Test { } protected: - ArenaPool pool_; HGraph* graph_; HParallelMove* moves_; mips::CodeGeneratorMIPS* codegen_; mips::MipsAssembler* assembler_; - std::unique_ptr<ArenaAllocator> allocator_; std::unique_ptr<AssemblerTestInfrastructure> test_helper_; std::unique_ptr<const MipsInstructionSetFeatures> isa_features_; }; diff --git a/compiler/optimizing/find_loops_test.cc b/compiler/optimizing/find_loops_test.cc index bbd28f5c46..c91752855b 100644 --- a/compiler/optimizing/find_loops_test.cc +++ b/compiler/optimizing/find_loops_test.cc @@ -27,7 +27,7 @@ namespace art { -class FindLoopsTest : public CommonCompilerTest {}; +class FindLoopsTest : public OptimizingUnitTest {}; TEST_F(FindLoopsTest, CFG1) { // Constant is not used. @@ -35,9 +35,7 @@ TEST_F(FindLoopsTest, CFG1) { Instruction::CONST_4 | 0 | 0, Instruction::RETURN_VOID); - ArenaPool arena; - ArenaAllocator allocator(&arena); - HGraph* graph = CreateCFG(&allocator, data); + HGraph* graph = CreateCFG(data); for (HBasicBlock* block : graph->GetBlocks()) { ASSERT_EQ(block->GetLoopInformation(), nullptr); } @@ -48,9 +46,7 @@ TEST_F(FindLoopsTest, CFG2) { Instruction::CONST_4 | 0 | 0, Instruction::RETURN); - ArenaPool arena; - ArenaAllocator allocator(&arena); - HGraph* graph = CreateCFG(&allocator, data); + HGraph* graph = CreateCFG(data); for (HBasicBlock* block : graph->GetBlocks()) { ASSERT_EQ(block->GetLoopInformation(), nullptr); } @@ -64,9 +60,7 @@ TEST_F(FindLoopsTest, CFG3) { Instruction::GOTO | 0x100, Instruction::RETURN); - ArenaPool arena; - ArenaAllocator allocator(&arena); - HGraph* graph = CreateCFG(&allocator, data); + HGraph* graph = CreateCFG(data); for (HBasicBlock* block : graph->GetBlocks()) { ASSERT_EQ(block->GetLoopInformation(), nullptr); } @@ -81,9 +75,7 @@ TEST_F(FindLoopsTest, CFG4) { Instruction::CONST_4 | 5 << 12 | 0, Instruction::RETURN | 0 << 8); - ArenaPool arena; - ArenaAllocator allocator(&arena); - HGraph* graph = CreateCFG(&allocator, data); + HGraph* graph = CreateCFG(data); for (HBasicBlock* block : graph->GetBlocks()) { ASSERT_EQ(block->GetLoopInformation(), nullptr); } @@ -96,9 +88,7 @@ TEST_F(FindLoopsTest, CFG5) { Instruction::CONST_4 | 4 << 12 | 0, Instruction::RETURN | 0 << 8); - ArenaPool arena; - ArenaAllocator allocator(&arena); - HGraph* graph = CreateCFG(&allocator, data); + HGraph* graph = CreateCFG(data); for (HBasicBlock* block : graph->GetBlocks()) { ASSERT_EQ(block->GetLoopInformation(), nullptr); } @@ -142,9 +132,7 @@ TEST_F(FindLoopsTest, Loop1) { Instruction::GOTO | 0xFE00, Instruction::RETURN_VOID); - ArenaPool arena; - ArenaAllocator allocator(&arena); - HGraph* graph = CreateCFG(&allocator, data); + HGraph* graph = CreateCFG(data); TestBlock(graph, 0, false, kInvalidBlockId); // entry block TestBlock(graph, 1, false, kInvalidBlockId); // pre header @@ -170,9 +158,7 @@ TEST_F(FindLoopsTest, Loop2) { Instruction::GOTO | 0xFD00, Instruction::RETURN | 0 << 8); - ArenaPool arena; - ArenaAllocator allocator(&arena); - HGraph* graph = CreateCFG(&allocator, data); + HGraph* graph = CreateCFG(data); TestBlock(graph, 0, false, kInvalidBlockId); // entry block TestBlock(graph, 1, false, kInvalidBlockId); // goto block @@ -195,9 +181,7 @@ TEST_F(FindLoopsTest, Loop3) { Instruction::GOTO | 0xFE00, Instruction::RETURN | 0 << 8); - ArenaPool arena; - ArenaAllocator allocator(&arena); - HGraph* graph = CreateCFG(&allocator, data); + HGraph* graph = CreateCFG(data); TestBlock(graph, 0, false, kInvalidBlockId); // entry block TestBlock(graph, 1, false, kInvalidBlockId); // goto block @@ -221,9 +205,7 @@ TEST_F(FindLoopsTest, Loop4) { Instruction::GOTO | 0xFB00, Instruction::RETURN | 0 << 8); - ArenaPool arena; - ArenaAllocator allocator(&arena); - HGraph* graph = CreateCFG(&allocator, data); + HGraph* graph = CreateCFG(data); TestBlock(graph, 0, false, kInvalidBlockId); // entry block TestBlock(graph, 1, false, kInvalidBlockId); // pre header @@ -247,9 +229,7 @@ TEST_F(FindLoopsTest, Loop5) { Instruction::GOTO | 0xFB00, Instruction::RETURN | 0 << 8); - ArenaPool arena; - ArenaAllocator allocator(&arena); - HGraph* graph = CreateCFG(&allocator, data); + HGraph* graph = CreateCFG(data); TestBlock(graph, 0, false, kInvalidBlockId); // entry block TestBlock(graph, 1, false, kInvalidBlockId); // pre header @@ -272,9 +252,7 @@ TEST_F(FindLoopsTest, InnerLoop) { Instruction::GOTO | 0xFB00, Instruction::RETURN | 0 << 8); - ArenaPool arena; - ArenaAllocator allocator(&arena); - HGraph* graph = CreateCFG(&allocator, data); + HGraph* graph = CreateCFG(data); TestBlock(graph, 0, false, kInvalidBlockId); // entry block TestBlock(graph, 1, false, kInvalidBlockId); // pre header of outer loop @@ -303,9 +281,7 @@ TEST_F(FindLoopsTest, TwoLoops) { Instruction::GOTO | 0xFE00, // second loop Instruction::RETURN | 0 << 8); - ArenaPool arena; - ArenaAllocator allocator(&arena); - HGraph* graph = CreateCFG(&allocator, data); + HGraph* graph = CreateCFG(data); TestBlock(graph, 0, false, kInvalidBlockId); // entry block TestBlock(graph, 1, false, kInvalidBlockId); // pre header of first loop @@ -333,9 +309,7 @@ TEST_F(FindLoopsTest, NonNaturalLoop) { Instruction::GOTO | 0xFD00, Instruction::RETURN | 0 << 8); - ArenaPool arena; - ArenaAllocator allocator(&arena); - HGraph* graph = CreateCFG(&allocator, data); + HGraph* graph = CreateCFG(data); ASSERT_TRUE(graph->GetBlocks()[3]->IsLoopHeader()); HLoopInformation* info = graph->GetBlocks()[3]->GetLoopInformation(); ASSERT_EQ(1u, info->NumberOfBackEdges()); @@ -349,9 +323,7 @@ TEST_F(FindLoopsTest, DoWhileLoop) { Instruction::IF_EQ, 0xFFFF, Instruction::RETURN | 0 << 8); - ArenaPool arena; - ArenaAllocator allocator(&arena); - HGraph* graph = CreateCFG(&allocator, data); + HGraph* graph = CreateCFG(data); TestBlock(graph, 0, false, kInvalidBlockId); // entry block TestBlock(graph, 1, false, kInvalidBlockId); // pre header of first loop diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h index 3060c80073..6af7b429f7 100644 --- a/compiler/optimizing/graph_checker.h +++ b/compiler/optimizing/graph_checker.h @@ -28,14 +28,14 @@ class GraphChecker : public HGraphDelegateVisitor { public: explicit GraphChecker(HGraph* graph, const char* dump_prefix = "art::GraphChecker: ") : HGraphDelegateVisitor(graph), - errors_(graph->GetArena()->Adapter(kArenaAllocGraphChecker)), + errors_(graph->GetAllocator()->Adapter(kArenaAllocGraphChecker)), dump_prefix_(dump_prefix), - seen_ids_(graph->GetArena(), + seen_ids_(graph->GetAllocator(), graph->GetCurrentInstructionId(), false, kArenaAllocGraphChecker), - blocks_storage_(graph->GetArena()->Adapter(kArenaAllocGraphChecker)), - visited_storage_(graph->GetArena(), 0u, true, kArenaAllocGraphChecker) {} + blocks_storage_(graph->GetAllocator()->Adapter(kArenaAllocGraphChecker)), + visited_storage_(graph->GetAllocator(), 0u, true, kArenaAllocGraphChecker) {} // Check the whole graph (in reverse post-order). void Run() { diff --git a/compiler/optimizing/graph_checker_test.cc b/compiler/optimizing/graph_checker_test.cc index 2b8231942b..9ca3e4953a 100644 --- a/compiler/optimizing/graph_checker_test.cc +++ b/compiler/optimizing/graph_checker_test.cc @@ -19,6 +19,12 @@ namespace art { +class GraphCheckerTest : public OptimizingUnitTest { + protected: + HGraph* CreateSimpleCFG(); + void TestCode(const uint16_t* data); +}; + /** * Create a simple control-flow graph composed of two blocks: * @@ -27,14 +33,14 @@ namespace art { * BasicBlock 1, pred: 0 * 1: Exit */ -HGraph* CreateSimpleCFG(ArenaAllocator* allocator) { - HGraph* graph = CreateGraph(allocator); - HBasicBlock* entry_block = new (allocator) HBasicBlock(graph); - entry_block->AddInstruction(new (allocator) HReturnVoid()); +HGraph* GraphCheckerTest::CreateSimpleCFG() { + HGraph* graph = CreateGraph(); + HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph); + entry_block->AddInstruction(new (GetAllocator()) HReturnVoid()); graph->AddBlock(entry_block); graph->SetEntryBlock(entry_block); - HBasicBlock* exit_block = new (allocator) HBasicBlock(graph); - exit_block->AddInstruction(new (allocator) HExit()); + HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph); + exit_block->AddInstruction(new (GetAllocator()) HExit()); graph->AddBlock(exit_block); graph->SetExitBlock(exit_block); entry_block->AddSuccessor(exit_block); @@ -42,10 +48,8 @@ HGraph* CreateSimpleCFG(ArenaAllocator* allocator) { return graph; } -static void TestCode(const uint16_t* data) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - HGraph* graph = CreateCFG(&allocator, data); +void GraphCheckerTest::TestCode(const uint16_t* data) { + HGraph* graph = CreateCFG(data); ASSERT_NE(graph, nullptr); GraphChecker graph_checker(graph); @@ -53,8 +57,6 @@ static void TestCode(const uint16_t* data) { ASSERT_TRUE(graph_checker.IsValid()); } -class GraphCheckerTest : public CommonCompilerTest {}; - TEST_F(GraphCheckerTest, ReturnVoid) { const uint16_t data[] = ZERO_REGISTER_CODE_ITEM( Instruction::RETURN_VOID); @@ -93,10 +95,7 @@ TEST_F(GraphCheckerTest, CFG3) { // Test case with an invalid graph containing inconsistent // predecessor/successor arcs in CFG. TEST_F(GraphCheckerTest, InconsistentPredecessorsAndSuccessors) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - - HGraph* graph = CreateSimpleCFG(&allocator); + HGraph* graph = CreateSimpleCFG(); GraphChecker graph_checker(graph); graph_checker.Run(); ASSERT_TRUE(graph_checker.IsValid()); @@ -111,10 +110,7 @@ TEST_F(GraphCheckerTest, InconsistentPredecessorsAndSuccessors) { // Test case with an invalid graph containing a non-branch last // instruction in a block. TEST_F(GraphCheckerTest, BlockEndingWithNonBranchInstruction) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - - HGraph* graph = CreateSimpleCFG(&allocator); + HGraph* graph = CreateSimpleCFG(); GraphChecker graph_checker(graph); graph_checker.Run(); ASSERT_TRUE(graph_checker.IsValid()); diff --git a/compiler/optimizing/graph_test.cc b/compiler/optimizing/graph_test.cc index 28ee3a5e8b..29af808731 100644 --- a/compiler/optimizing/graph_test.cc +++ b/compiler/optimizing/graph_test.cc @@ -24,43 +24,52 @@ namespace art { -static HBasicBlock* createIfBlock(HGraph* graph, ArenaAllocator* allocator) { - HBasicBlock* if_block = new (allocator) HBasicBlock(graph); +class GraphTest : public OptimizingUnitTest { + protected: + HBasicBlock* CreateIfBlock(HGraph* graph); + HBasicBlock* CreateGotoBlock(HGraph* graph); + HBasicBlock* CreateEntryBlock(HGraph* graph); + HBasicBlock* CreateReturnBlock(HGraph* graph); + HBasicBlock* CreateExitBlock(HGraph* graph); +}; + +HBasicBlock* GraphTest::CreateIfBlock(HGraph* graph) { + HBasicBlock* if_block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(if_block); HInstruction* instr = graph->GetIntConstant(4); - HInstruction* equal = new (allocator) HEqual(instr, instr); + HInstruction* equal = new (GetAllocator()) HEqual(instr, instr); if_block->AddInstruction(equal); - instr = new (allocator) HIf(equal); + instr = new (GetAllocator()) HIf(equal); if_block->AddInstruction(instr); return if_block; } -static HBasicBlock* createGotoBlock(HGraph* graph, ArenaAllocator* allocator) { - HBasicBlock* block = new (allocator) HBasicBlock(graph); +HBasicBlock* GraphTest::CreateGotoBlock(HGraph* graph) { + HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(block); - HInstruction* got = new (allocator) HGoto(); + HInstruction* got = new (GetAllocator()) HGoto(); block->AddInstruction(got); return block; } -static HBasicBlock* createEntryBlock(HGraph* graph, ArenaAllocator* allocator) { - HBasicBlock* block = createGotoBlock(graph, allocator); +HBasicBlock* GraphTest::CreateEntryBlock(HGraph* graph) { + HBasicBlock* block = CreateGotoBlock(graph); graph->SetEntryBlock(block); return block; } -static HBasicBlock* createReturnBlock(HGraph* graph, ArenaAllocator* allocator) { - HBasicBlock* block = new (allocator) HBasicBlock(graph); +HBasicBlock* GraphTest::CreateReturnBlock(HGraph* graph) { + HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(block); - HInstruction* return_instr = new (allocator) HReturnVoid(); + HInstruction* return_instr = new (GetAllocator()) HReturnVoid(); block->AddInstruction(return_instr); return block; } -static HBasicBlock* createExitBlock(HGraph* graph, ArenaAllocator* allocator) { - HBasicBlock* block = new (allocator) HBasicBlock(graph); +HBasicBlock* GraphTest::CreateExitBlock(HGraph* graph) { + HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(block); - HInstruction* exit_instr = new (allocator) HExit(); + HInstruction* exit_instr = new (GetAllocator()) HExit(); block->AddInstruction(exit_instr); return block; } @@ -68,16 +77,13 @@ static HBasicBlock* createExitBlock(HGraph* graph, ArenaAllocator* allocator) { // Test that the successors of an if block stay consistent after a SimplifyCFG. // This test sets the false block to be the return block. -TEST(GraphTest, IfSuccessorSimpleJoinBlock1) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - - HGraph* graph = CreateGraph(&allocator); - HBasicBlock* entry_block = createEntryBlock(graph, &allocator); - HBasicBlock* if_block = createIfBlock(graph, &allocator); - HBasicBlock* if_true = createGotoBlock(graph, &allocator); - HBasicBlock* return_block = createReturnBlock(graph, &allocator); - HBasicBlock* exit_block = createExitBlock(graph, &allocator); +TEST_F(GraphTest, IfSuccessorSimpleJoinBlock1) { + HGraph* graph = CreateGraph(); + HBasicBlock* entry_block = CreateEntryBlock(graph); + HBasicBlock* if_block = CreateIfBlock(graph); + HBasicBlock* if_true = CreateGotoBlock(graph); + HBasicBlock* return_block = CreateReturnBlock(graph); + HBasicBlock* exit_block = CreateExitBlock(graph); entry_block->AddSuccessor(if_block); if_block->AddSuccessor(if_true); @@ -103,16 +109,13 @@ TEST(GraphTest, IfSuccessorSimpleJoinBlock1) { // Test that the successors of an if block stay consistent after a SimplifyCFG. // This test sets the true block to be the return block. -TEST(GraphTest, IfSuccessorSimpleJoinBlock2) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - - HGraph* graph = CreateGraph(&allocator); - HBasicBlock* entry_block = createEntryBlock(graph, &allocator); - HBasicBlock* if_block = createIfBlock(graph, &allocator); - HBasicBlock* if_false = createGotoBlock(graph, &allocator); - HBasicBlock* return_block = createReturnBlock(graph, &allocator); - HBasicBlock* exit_block = createExitBlock(graph, &allocator); +TEST_F(GraphTest, IfSuccessorSimpleJoinBlock2) { + HGraph* graph = CreateGraph(); + HBasicBlock* entry_block = CreateEntryBlock(graph); + HBasicBlock* if_block = CreateIfBlock(graph); + HBasicBlock* if_false = CreateGotoBlock(graph); + HBasicBlock* return_block = CreateReturnBlock(graph); + HBasicBlock* exit_block = CreateExitBlock(graph); entry_block->AddSuccessor(if_block); if_block->AddSuccessor(return_block); @@ -138,15 +141,12 @@ TEST(GraphTest, IfSuccessorSimpleJoinBlock2) { // Test that the successors of an if block stay consistent after a SimplifyCFG. // This test sets the true block to be the loop header. -TEST(GraphTest, IfSuccessorMultipleBackEdges1) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - - HGraph* graph = CreateGraph(&allocator); - HBasicBlock* entry_block = createEntryBlock(graph, &allocator); - HBasicBlock* if_block = createIfBlock(graph, &allocator); - HBasicBlock* return_block = createReturnBlock(graph, &allocator); - HBasicBlock* exit_block = createExitBlock(graph, &allocator); +TEST_F(GraphTest, IfSuccessorMultipleBackEdges1) { + HGraph* graph = CreateGraph(); + HBasicBlock* entry_block = CreateEntryBlock(graph); + HBasicBlock* if_block = CreateIfBlock(graph); + HBasicBlock* return_block = CreateReturnBlock(graph); + HBasicBlock* exit_block = CreateExitBlock(graph); entry_block->AddSuccessor(if_block); if_block->AddSuccessor(if_block); @@ -173,15 +173,12 @@ TEST(GraphTest, IfSuccessorMultipleBackEdges1) { // Test that the successors of an if block stay consistent after a SimplifyCFG. // This test sets the false block to be the loop header. -TEST(GraphTest, IfSuccessorMultipleBackEdges2) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - - HGraph* graph = CreateGraph(&allocator); - HBasicBlock* entry_block = createEntryBlock(graph, &allocator); - HBasicBlock* if_block = createIfBlock(graph, &allocator); - HBasicBlock* return_block = createReturnBlock(graph, &allocator); - HBasicBlock* exit_block = createExitBlock(graph, &allocator); +TEST_F(GraphTest, IfSuccessorMultipleBackEdges2) { + HGraph* graph = CreateGraph(); + HBasicBlock* entry_block = CreateEntryBlock(graph); + HBasicBlock* if_block = CreateIfBlock(graph); + HBasicBlock* return_block = CreateReturnBlock(graph); + HBasicBlock* exit_block = CreateExitBlock(graph); entry_block->AddSuccessor(if_block); if_block->AddSuccessor(return_block); @@ -208,16 +205,13 @@ TEST(GraphTest, IfSuccessorMultipleBackEdges2) { // Test that the successors of an if block stay consistent after a SimplifyCFG. // This test sets the true block to be a loop header with multiple pre headers. -TEST(GraphTest, IfSuccessorMultiplePreHeaders1) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - - HGraph* graph = CreateGraph(&allocator); - HBasicBlock* entry_block = createEntryBlock(graph, &allocator); - HBasicBlock* first_if_block = createIfBlock(graph, &allocator); - HBasicBlock* if_block = createIfBlock(graph, &allocator); - HBasicBlock* loop_block = createGotoBlock(graph, &allocator); - HBasicBlock* return_block = createReturnBlock(graph, &allocator); +TEST_F(GraphTest, IfSuccessorMultiplePreHeaders1) { + HGraph* graph = CreateGraph(); + HBasicBlock* entry_block = CreateEntryBlock(graph); + HBasicBlock* first_if_block = CreateIfBlock(graph); + HBasicBlock* if_block = CreateIfBlock(graph); + HBasicBlock* loop_block = CreateGotoBlock(graph); + HBasicBlock* return_block = CreateReturnBlock(graph); entry_block->AddSuccessor(first_if_block); first_if_block->AddSuccessor(if_block); @@ -247,16 +241,13 @@ TEST(GraphTest, IfSuccessorMultiplePreHeaders1) { // Test that the successors of an if block stay consistent after a SimplifyCFG. // This test sets the false block to be a loop header with multiple pre headers. -TEST(GraphTest, IfSuccessorMultiplePreHeaders2) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - - HGraph* graph = CreateGraph(&allocator); - HBasicBlock* entry_block = createEntryBlock(graph, &allocator); - HBasicBlock* first_if_block = createIfBlock(graph, &allocator); - HBasicBlock* if_block = createIfBlock(graph, &allocator); - HBasicBlock* loop_block = createGotoBlock(graph, &allocator); - HBasicBlock* return_block = createReturnBlock(graph, &allocator); +TEST_F(GraphTest, IfSuccessorMultiplePreHeaders2) { + HGraph* graph = CreateGraph(); + HBasicBlock* entry_block = CreateEntryBlock(graph); + HBasicBlock* first_if_block = CreateIfBlock(graph); + HBasicBlock* if_block = CreateIfBlock(graph); + HBasicBlock* loop_block = CreateGotoBlock(graph); + HBasicBlock* return_block = CreateReturnBlock(graph); entry_block->AddSuccessor(first_if_block); first_if_block->AddSuccessor(if_block); @@ -283,17 +274,14 @@ TEST(GraphTest, IfSuccessorMultiplePreHeaders2) { loop_block->GetLoopInformation()->GetPreHeader()); } -TEST(GraphTest, InsertInstructionBefore) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - - HGraph* graph = CreateGraph(&allocator); - HBasicBlock* block = createGotoBlock(graph, &allocator); +TEST_F(GraphTest, InsertInstructionBefore) { + HGraph* graph = CreateGraph(); + HBasicBlock* block = CreateGotoBlock(graph); HInstruction* got = block->GetLastInstruction(); ASSERT_TRUE(got->IsControlFlow()); // Test at the beginning of the block. - HInstruction* first_instruction = new (&allocator) HIntConstant(4); + HInstruction* first_instruction = new (GetAllocator()) HIntConstant(4); block->InsertInstructionBefore(first_instruction, got); ASSERT_NE(first_instruction->GetId(), -1); @@ -306,7 +294,7 @@ TEST(GraphTest, InsertInstructionBefore) { ASSERT_EQ(got->GetPrevious(), first_instruction); // Test in the middle of the block. - HInstruction* second_instruction = new (&allocator) HIntConstant(4); + HInstruction* second_instruction = new (GetAllocator()) HIntConstant(4); block->InsertInstructionBefore(second_instruction, got); ASSERT_NE(second_instruction->GetId(), -1); diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc index 8ea312d0ea..c09e5df1c0 100644 --- a/compiler/optimizing/gvn.cc +++ b/compiler/optimizing/gvn.cc @@ -566,7 +566,7 @@ HBasicBlock* GlobalValueNumberer::FindVisitedBlockWithRecyclableSet( } void GVNOptimization::Run() { - GlobalValueNumberer gvn(graph_->GetArena(), graph_, side_effects_); + GlobalValueNumberer gvn(graph_->GetAllocator(), graph_, side_effects_); gvn.Run(); } diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc index ac0dbee2c5..3bf4cc35ba 100644 --- a/compiler/optimizing/gvn_test.cc +++ b/compiler/optimizing/gvn_test.cc @@ -24,77 +24,74 @@ namespace art { -class GVNTest : public CommonCompilerTest {}; +class GVNTest : public OptimizingUnitTest {}; TEST_F(GVNTest, LocalFieldElimination) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - - HGraph* graph = CreateGraph(&allocator); - HBasicBlock* entry = new (&allocator) HBasicBlock(graph); + HGraph* graph = CreateGraph(); + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(entry); graph->SetEntryBlock(entry); - HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(), - dex::TypeIndex(0), - 0, - DataType::Type::kReference); + HInstruction* parameter = new (GetAllocator()) HParameterValue(graph->GetDexFile(), + dex::TypeIndex(0), + 0, + DataType::Type::kReference); entry->AddInstruction(parameter); - HBasicBlock* block = new (&allocator) HBasicBlock(graph); + HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(block); entry->AddSuccessor(block); - block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, - nullptr, - DataType::Type::kReference, - MemberOffset(42), - false, - kUnknownFieldIndex, - kUnknownClassDefIndex, - graph->GetDexFile(), - 0)); - block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, - nullptr, - DataType::Type::kReference, - MemberOffset(42), - false, - kUnknownFieldIndex, - kUnknownClassDefIndex, - graph->GetDexFile(), - 0)); + block->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter, + nullptr, + DataType::Type::kReference, + MemberOffset(42), + false, + kUnknownFieldIndex, + kUnknownClassDefIndex, + graph->GetDexFile(), + 0)); + block->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter, + nullptr, + DataType::Type::kReference, + MemberOffset(42), + false, + kUnknownFieldIndex, + kUnknownClassDefIndex, + graph->GetDexFile(), + 0)); HInstruction* to_remove = block->GetLastInstruction(); - block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, - nullptr, - DataType::Type::kReference, - MemberOffset(43), - false, - kUnknownFieldIndex, - kUnknownClassDefIndex, - graph->GetDexFile(), - 0)); + block->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter, + nullptr, + DataType::Type::kReference, + MemberOffset(43), + false, + kUnknownFieldIndex, + kUnknownClassDefIndex, + graph->GetDexFile(), + 0)); HInstruction* different_offset = block->GetLastInstruction(); // Kill the value. - block->AddInstruction(new (&allocator) HInstanceFieldSet(parameter, - parameter, - nullptr, - DataType::Type::kReference, - MemberOffset(42), - false, - kUnknownFieldIndex, - kUnknownClassDefIndex, - graph->GetDexFile(), - 0)); - block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, - nullptr, - DataType::Type::kReference, - MemberOffset(42), - false, - kUnknownFieldIndex, - kUnknownClassDefIndex, - graph->GetDexFile(), - 0)); + block->AddInstruction(new (GetAllocator()) HInstanceFieldSet(parameter, + parameter, + nullptr, + DataType::Type::kReference, + MemberOffset(42), + false, + kUnknownFieldIndex, + kUnknownClassDefIndex, + graph->GetDexFile(), + 0)); + block->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter, + nullptr, + DataType::Type::kReference, + MemberOffset(42), + false, + kUnknownFieldIndex, + kUnknownClassDefIndex, + graph->GetDexFile(), + 0)); HInstruction* use_after_kill = block->GetLastInstruction(); - block->AddInstruction(new (&allocator) HExit()); + block->AddInstruction(new (GetAllocator()) HExit()); ASSERT_EQ(to_remove->GetBlock(), block); ASSERT_EQ(different_offset->GetBlock(), block); @@ -111,36 +108,33 @@ TEST_F(GVNTest, LocalFieldElimination) { } TEST_F(GVNTest, GlobalFieldElimination) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - - HGraph* graph = CreateGraph(&allocator); - HBasicBlock* entry = new (&allocator) HBasicBlock(graph); + HGraph* graph = CreateGraph(); + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(entry); graph->SetEntryBlock(entry); - HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(), - dex::TypeIndex(0), - 0, - DataType::Type::kReference); + HInstruction* parameter = new (GetAllocator()) HParameterValue(graph->GetDexFile(), + dex::TypeIndex(0), + 0, + DataType::Type::kReference); entry->AddInstruction(parameter); - HBasicBlock* block = new (&allocator) HBasicBlock(graph); + HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(block); entry->AddSuccessor(block); - block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, - nullptr, - DataType::Type::kBool, - MemberOffset(42), - false, - kUnknownFieldIndex, - kUnknownClassDefIndex, - graph->GetDexFile(), - 0)); - - block->AddInstruction(new (&allocator) HIf(block->GetLastInstruction())); - HBasicBlock* then = new (&allocator) HBasicBlock(graph); - HBasicBlock* else_ = new (&allocator) HBasicBlock(graph); - HBasicBlock* join = new (&allocator) HBasicBlock(graph); + block->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter, + nullptr, + DataType::Type::kBool, + MemberOffset(42), + false, + kUnknownFieldIndex, + kUnknownClassDefIndex, + graph->GetDexFile(), + 0)); + + block->AddInstruction(new (GetAllocator()) HIf(block->GetLastInstruction())); + HBasicBlock* then = new (GetAllocator()) HBasicBlock(graph); + HBasicBlock* else_ = new (GetAllocator()) HBasicBlock(graph); + HBasicBlock* join = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(then); graph->AddBlock(else_); graph->AddBlock(join); @@ -150,36 +144,36 @@ TEST_F(GVNTest, GlobalFieldElimination) { then->AddSuccessor(join); else_->AddSuccessor(join); - then->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, - nullptr, - DataType::Type::kBool, - MemberOffset(42), - false, - kUnknownFieldIndex, - kUnknownClassDefIndex, - graph->GetDexFile(), - 0)); - then->AddInstruction(new (&allocator) HGoto()); - else_->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, - nullptr, - DataType::Type::kBool, - MemberOffset(42), - false, - kUnknownFieldIndex, - kUnknownClassDefIndex, - graph->GetDexFile(), - 0)); - else_->AddInstruction(new (&allocator) HGoto()); - join->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, - nullptr, - DataType::Type::kBool, - MemberOffset(42), - false, - kUnknownFieldIndex, - kUnknownClassDefIndex, - graph->GetDexFile(), - 0)); - join->AddInstruction(new (&allocator) HExit()); + then->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter, + nullptr, + DataType::Type::kBool, + MemberOffset(42), + false, + kUnknownFieldIndex, + kUnknownClassDefIndex, + graph->GetDexFile(), + 0)); + then->AddInstruction(new (GetAllocator()) HGoto()); + else_->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter, + nullptr, + DataType::Type::kBool, + MemberOffset(42), + false, + kUnknownFieldIndex, + kUnknownClassDefIndex, + graph->GetDexFile(), + 0)); + else_->AddInstruction(new (GetAllocator()) HGoto()); + join->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter, + nullptr, + DataType::Type::kBool, + MemberOffset(42), + false, + kUnknownFieldIndex, + kUnknownClassDefIndex, + graph->GetDexFile(), + 0)); + join->AddInstruction(new (GetAllocator()) HExit()); graph->BuildDominatorTree(); SideEffectsAnalysis side_effects(graph); @@ -193,37 +187,34 @@ TEST_F(GVNTest, GlobalFieldElimination) { } TEST_F(GVNTest, LoopFieldElimination) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - - HGraph* graph = CreateGraph(&allocator); - HBasicBlock* entry = new (&allocator) HBasicBlock(graph); + HGraph* graph = CreateGraph(); + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(entry); graph->SetEntryBlock(entry); - HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(), - dex::TypeIndex(0), - 0, - DataType::Type::kReference); + HInstruction* parameter = new (GetAllocator()) HParameterValue(graph->GetDexFile(), + dex::TypeIndex(0), + 0, + DataType::Type::kReference); entry->AddInstruction(parameter); - HBasicBlock* block = new (&allocator) HBasicBlock(graph); + HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(block); entry->AddSuccessor(block); - block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, - nullptr, - DataType::Type::kBool, - MemberOffset(42), - false, - kUnknownFieldIndex, - kUnknownClassDefIndex, - graph->GetDexFile(), - 0)); - block->AddInstruction(new (&allocator) HGoto()); - - HBasicBlock* loop_header = new (&allocator) HBasicBlock(graph); - HBasicBlock* loop_body = new (&allocator) HBasicBlock(graph); - HBasicBlock* exit = new (&allocator) HBasicBlock(graph); + block->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter, + nullptr, + DataType::Type::kBool, + MemberOffset(42), + false, + kUnknownFieldIndex, + kUnknownClassDefIndex, + graph->GetDexFile(), + 0)); + block->AddInstruction(new (GetAllocator()) HGoto()); + + HBasicBlock* loop_header = new (GetAllocator()) HBasicBlock(graph); + HBasicBlock* loop_body = new (GetAllocator()) HBasicBlock(graph); + HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(loop_header); graph->AddBlock(loop_body); @@ -233,54 +224,54 @@ TEST_F(GVNTest, LoopFieldElimination) { loop_header->AddSuccessor(exit); loop_body->AddSuccessor(loop_header); - loop_header->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, - nullptr, - DataType::Type::kBool, - MemberOffset(42), - false, - kUnknownFieldIndex, - kUnknownClassDefIndex, - graph->GetDexFile(), - 0)); + loop_header->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter, + nullptr, + DataType::Type::kBool, + MemberOffset(42), + false, + kUnknownFieldIndex, + kUnknownClassDefIndex, + graph->GetDexFile(), + 0)); HInstruction* field_get_in_loop_header = loop_header->GetLastInstruction(); - loop_header->AddInstruction(new (&allocator) HIf(block->GetLastInstruction())); + loop_header->AddInstruction(new (GetAllocator()) HIf(block->GetLastInstruction())); // Kill inside the loop body to prevent field gets inside the loop header // and the body to be GVN'ed. - loop_body->AddInstruction(new (&allocator) HInstanceFieldSet(parameter, - parameter, - nullptr, - DataType::Type::kBool, - MemberOffset(42), - false, - kUnknownFieldIndex, - kUnknownClassDefIndex, - graph->GetDexFile(), - 0)); + loop_body->AddInstruction(new (GetAllocator()) HInstanceFieldSet(parameter, + parameter, + nullptr, + DataType::Type::kBool, + MemberOffset(42), + false, + kUnknownFieldIndex, + kUnknownClassDefIndex, + graph->GetDexFile(), + 0)); HInstruction* field_set = loop_body->GetLastInstruction(); - loop_body->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, - nullptr, - DataType::Type::kBool, - MemberOffset(42), - false, - kUnknownFieldIndex, - kUnknownClassDefIndex, - graph->GetDexFile(), - 0)); + loop_body->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter, + nullptr, + DataType::Type::kBool, + MemberOffset(42), + false, + kUnknownFieldIndex, + kUnknownClassDefIndex, + graph->GetDexFile(), + 0)); HInstruction* field_get_in_loop_body = loop_body->GetLastInstruction(); - loop_body->AddInstruction(new (&allocator) HGoto()); - - exit->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, - nullptr, - DataType::Type::kBool, - MemberOffset(42), - false, - kUnknownFieldIndex, - kUnknownClassDefIndex, - graph->GetDexFile(), - 0)); + loop_body->AddInstruction(new (GetAllocator()) HGoto()); + + exit->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter, + nullptr, + DataType::Type::kBool, + MemberOffset(42), + false, + kUnknownFieldIndex, + kUnknownClassDefIndex, + graph->GetDexFile(), + 0)); HInstruction* field_get_in_exit = exit->GetLastInstruction(); - exit->AddInstruction(new (&allocator) HExit()); + exit->AddInstruction(new (GetAllocator()) HExit()); ASSERT_EQ(field_get_in_loop_header->GetBlock(), loop_header); ASSERT_EQ(field_get_in_loop_body->GetBlock(), loop_body); @@ -315,22 +306,19 @@ TEST_F(GVNTest, LoopFieldElimination) { // Test that inner loops affect the side effects of the outer loop. TEST_F(GVNTest, LoopSideEffects) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - static const SideEffects kCanTriggerGC = SideEffects::CanTriggerGC(); - HGraph* graph = CreateGraph(&allocator); - HBasicBlock* entry = new (&allocator) HBasicBlock(graph); + HGraph* graph = CreateGraph(); + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(entry); graph->SetEntryBlock(entry); - HBasicBlock* outer_loop_header = new (&allocator) HBasicBlock(graph); - HBasicBlock* outer_loop_body = new (&allocator) HBasicBlock(graph); - HBasicBlock* outer_loop_exit = new (&allocator) HBasicBlock(graph); - HBasicBlock* inner_loop_header = new (&allocator) HBasicBlock(graph); - HBasicBlock* inner_loop_body = new (&allocator) HBasicBlock(graph); - HBasicBlock* inner_loop_exit = new (&allocator) HBasicBlock(graph); + HBasicBlock* outer_loop_header = new (GetAllocator()) HBasicBlock(graph); + HBasicBlock* outer_loop_body = new (GetAllocator()) HBasicBlock(graph); + HBasicBlock* outer_loop_exit = new (GetAllocator()) HBasicBlock(graph); + HBasicBlock* inner_loop_header = new (GetAllocator()) HBasicBlock(graph); + HBasicBlock* inner_loop_body = new (GetAllocator()) HBasicBlock(graph); + HBasicBlock* inner_loop_exit = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(outer_loop_header); graph->AddBlock(outer_loop_body); @@ -348,20 +336,20 @@ TEST_F(GVNTest, LoopSideEffects) { inner_loop_body->AddSuccessor(inner_loop_header); inner_loop_exit->AddSuccessor(outer_loop_header); - HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(), - dex::TypeIndex(0), - 0, - DataType::Type::kBool); + HInstruction* parameter = new (GetAllocator()) HParameterValue(graph->GetDexFile(), + dex::TypeIndex(0), + 0, + DataType::Type::kBool); entry->AddInstruction(parameter); - entry->AddInstruction(new (&allocator) HGoto()); - outer_loop_header->AddInstruction(new (&allocator) HSuspendCheck()); - outer_loop_header->AddInstruction(new (&allocator) HIf(parameter)); - outer_loop_body->AddInstruction(new (&allocator) HGoto()); - inner_loop_header->AddInstruction(new (&allocator) HSuspendCheck()); - inner_loop_header->AddInstruction(new (&allocator) HIf(parameter)); - inner_loop_body->AddInstruction(new (&allocator) HGoto()); - inner_loop_exit->AddInstruction(new (&allocator) HGoto()); - outer_loop_exit->AddInstruction(new (&allocator) HExit()); + entry->AddInstruction(new (GetAllocator()) HGoto()); + outer_loop_header->AddInstruction(new (GetAllocator()) HSuspendCheck()); + outer_loop_header->AddInstruction(new (GetAllocator()) HIf(parameter)); + outer_loop_body->AddInstruction(new (GetAllocator()) HGoto()); + inner_loop_header->AddInstruction(new (GetAllocator()) HSuspendCheck()); + inner_loop_header->AddInstruction(new (GetAllocator()) HIf(parameter)); + inner_loop_body->AddInstruction(new (GetAllocator()) HGoto()); + inner_loop_exit->AddInstruction(new (GetAllocator()) HGoto()); + outer_loop_exit->AddInstruction(new (GetAllocator()) HExit()); graph->BuildDominatorTree(); @@ -371,16 +359,16 @@ TEST_F(GVNTest, LoopSideEffects) { // Check that the only side effect of loops is to potentially trigger GC. { // Make one block with a side effect. - entry->AddInstruction(new (&allocator) HInstanceFieldSet(parameter, - parameter, - nullptr, - DataType::Type::kReference, - MemberOffset(42), - false, - kUnknownFieldIndex, - kUnknownClassDefIndex, - graph->GetDexFile(), - 0)); + entry->AddInstruction(new (GetAllocator()) HInstanceFieldSet(parameter, + parameter, + nullptr, + DataType::Type::kReference, + MemberOffset(42), + false, + kUnknownFieldIndex, + kUnknownClassDefIndex, + graph->GetDexFile(), + 0)); SideEffectsAnalysis side_effects(graph); side_effects.Run(); @@ -396,16 +384,16 @@ TEST_F(GVNTest, LoopSideEffects) { // Check that the side effects of the outer loop does not affect the inner loop. { outer_loop_body->InsertInstructionBefore( - new (&allocator) HInstanceFieldSet(parameter, - parameter, - nullptr, - DataType::Type::kReference, - MemberOffset(42), - false, - kUnknownFieldIndex, - kUnknownClassDefIndex, - graph->GetDexFile(), - 0), + new (GetAllocator()) HInstanceFieldSet(parameter, + parameter, + nullptr, + DataType::Type::kReference, + MemberOffset(42), + false, + kUnknownFieldIndex, + kUnknownClassDefIndex, + graph->GetDexFile(), + 0), outer_loop_body->GetLastInstruction()); SideEffectsAnalysis side_effects(graph); @@ -422,16 +410,16 @@ TEST_F(GVNTest, LoopSideEffects) { { outer_loop_body->RemoveInstruction(outer_loop_body->GetFirstInstruction()); inner_loop_body->InsertInstructionBefore( - new (&allocator) HInstanceFieldSet(parameter, - parameter, - nullptr, - DataType::Type::kReference, - MemberOffset(42), - false, - kUnknownFieldIndex, - kUnknownClassDefIndex, - graph->GetDexFile(), - 0), + new (GetAllocator()) HInstanceFieldSet(parameter, + parameter, + nullptr, + DataType::Type::kReference, + MemberOffset(42), + false, + kUnknownFieldIndex, + kUnknownClassDefIndex, + graph->GetDexFile(), + 0), inner_loop_body->GetLastInstruction()); SideEffectsAnalysis side_effects(graph); diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc index eab17aad31..0987293e4e 100644 --- a/compiler/optimizing/induction_var_analysis.cc +++ b/compiler/optimizing/induction_var_analysis.cc @@ -100,17 +100,17 @@ static DataType::Type ImplicitConversion(DataType::Type type) { HInductionVarAnalysis::HInductionVarAnalysis(HGraph* graph) : HOptimization(graph, kInductionPassName), global_depth_(0), - stack_(graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)), + stack_(graph->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)), map_(std::less<HInstruction*>(), - graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)), - scc_(graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)), + graph->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)), + scc_(graph->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)), cycle_(std::less<HInstruction*>(), - graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)), + graph->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)), type_(DataType::Type::kVoid), induction_(std::less<HLoopInformation*>(), - graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)), + graph->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)), cycles_(std::less<HPhi*>(), - graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)) { + graph->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)) { } void HInductionVarAnalysis::Run() { @@ -265,7 +265,8 @@ void HInductionVarAnalysis::ClassifyNonTrivial(HLoopInformation* loop) { // Rotate proper loop-phi to front. if (size > 1) { - ArenaVector<HInstruction*> other(graph_->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)); + ArenaVector<HInstruction*> other( + graph_->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)); RotateEntryPhiFirst(loop, &scc_, &other); } @@ -991,7 +992,7 @@ void HInductionVarAnalysis::AssignInfo(HLoopInformation* loop, it = induction_.Put(loop, ArenaSafeMap<HInstruction*, InductionInfo*>( std::less<HInstruction*>(), - graph_->GetArena()->Adapter(kArenaAllocInductionVarAnalysis))); + graph_->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis))); } it->second.Put(instruction, info); } @@ -1082,7 +1083,7 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::CreateSimplifiedInv return CreateSimplifiedInvariant(kSub, b->op_b, b->op_a); } } - return new (graph_->GetArena()) InductionInfo( + return new (graph_->GetAllocator()) InductionInfo( kInvariant, op, a, b, nullptr, ImplicitConversion(b->type)); } @@ -1119,7 +1120,7 @@ HInstruction* HInductionVarAnalysis::GetShiftConstant(HLoopInformation* loop, void HInductionVarAnalysis::AssignCycle(HPhi* phi) { ArenaSet<HInstruction*>* set = &cycles_.Put(phi, ArenaSet<HInstruction*>( - graph_->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)))->second; + graph_->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)))->second; for (HInstruction* i : scc_) { set->insert(i); } diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h index 421b3ab9d0..a2d302ae81 100644 --- a/compiler/optimizing/induction_var_analysis.h +++ b/compiler/optimizing/induction_var_analysis.h @@ -129,7 +129,7 @@ class HInductionVarAnalysis : public HOptimization { InductionInfo* CreateInvariantFetch(HInstruction* f) { DCHECK(f != nullptr); - return new (graph_->GetArena()) + return new (graph_->GetAllocator()) InductionInfo(kInvariant, kFetch, nullptr, nullptr, f, f->GetType()); } @@ -138,7 +138,7 @@ class HInductionVarAnalysis : public HOptimization { InductionInfo* b, DataType::Type type) { DCHECK(a != nullptr && b != nullptr); - return new (graph_->GetArena()) InductionInfo(kInvariant, op, a, b, nullptr, type); + return new (graph_->GetAllocator()) InductionInfo(kInvariant, op, a, b, nullptr, type); } InductionInfo* CreateInduction(InductionClass ic, @@ -148,7 +148,7 @@ class HInductionVarAnalysis : public HOptimization { HInstruction* f, DataType::Type type) { DCHECK(a != nullptr && b != nullptr); - return new (graph_->GetArena()) InductionInfo(ic, op, a, b, f, type); + return new (graph_->GetAllocator()) InductionInfo(ic, op, a, b, f, type); } // Methods for analysis. diff --git a/compiler/optimizing/induction_var_analysis_test.cc b/compiler/optimizing/induction_var_analysis_test.cc index 53c8044a0b..4c11ad4643 100644 --- a/compiler/optimizing/induction_var_analysis_test.cc +++ b/compiler/optimizing/induction_var_analysis_test.cc @@ -27,12 +27,10 @@ namespace art { /** * Fixture class for the InductionVarAnalysis tests. */ -class InductionVarAnalysisTest : public CommonCompilerTest { +class InductionVarAnalysisTest : public OptimizingUnitTest { public: InductionVarAnalysisTest() - : pool_(), - allocator_(&pool_), - iva_(nullptr), + : iva_(nullptr), entry_(nullptr), return_(nullptr), exit_(nullptr), @@ -44,7 +42,7 @@ class InductionVarAnalysisTest : public CommonCompilerTest { constant100_(nullptr), constantm1_(nullptr), float_constant0_(nullptr) { - graph_ = CreateGraph(&allocator_); + graph_ = CreateGraph(); } ~InductionVarAnalysisTest() { } @@ -52,15 +50,15 @@ class InductionVarAnalysisTest : public CommonCompilerTest { // Builds single for-loop at depth d. void BuildForLoop(int d, int n) { ASSERT_LT(d, n); - loop_preheader_[d] = new (&allocator_) HBasicBlock(graph_); + loop_preheader_[d] = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(loop_preheader_[d]); - loop_header_[d] = new (&allocator_) HBasicBlock(graph_); + loop_header_[d] = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(loop_header_[d]); loop_preheader_[d]->AddSuccessor(loop_header_[d]); if (d < (n - 1)) { BuildForLoop(d + 1, n); } - loop_body_[d] = new (&allocator_) HBasicBlock(graph_); + loop_body_[d] = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(loop_body_[d]); loop_body_[d]->AddSuccessor(loop_header_[d]); if (d < (n - 1)) { @@ -79,12 +77,12 @@ class InductionVarAnalysisTest : public CommonCompilerTest { graph_->SetNumberOfVRegs(n + 3); // Build basic blocks with entry, nested loop, exit. - entry_ = new (&allocator_) HBasicBlock(graph_); + entry_ = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(entry_); BuildForLoop(0, n); - return_ = new (&allocator_) HBasicBlock(graph_); + return_ = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(return_); - exit_ = new (&allocator_) HBasicBlock(graph_); + exit_ = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(exit_); entry_->AddSuccessor(loop_preheader_[0]); loop_header_[0]->AddSuccessor(return_); @@ -93,7 +91,7 @@ class InductionVarAnalysisTest : public CommonCompilerTest { graph_->SetExitBlock(exit_); // Provide entry and exit instructions. - parameter_ = new (&allocator_) HParameterValue( + parameter_ = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference, true); entry_->AddInstruction(parameter_); constant0_ = graph_->GetIntConstant(0); @@ -103,20 +101,20 @@ class InductionVarAnalysisTest : public CommonCompilerTest { constant100_ = graph_->GetIntConstant(100); constantm1_ = graph_->GetIntConstant(-1); float_constant0_ = graph_->GetFloatConstant(0.0f); - return_->AddInstruction(new (&allocator_) HReturnVoid()); - exit_->AddInstruction(new (&allocator_) HExit()); + return_->AddInstruction(new (GetAllocator()) HReturnVoid()); + exit_->AddInstruction(new (GetAllocator()) HExit()); // Provide loop instructions. for (int d = 0; d < n; d++) { - basic_[d] = new (&allocator_) HPhi(&allocator_, d, 0, DataType::Type::kInt32); - loop_preheader_[d]->AddInstruction(new (&allocator_) HGoto()); + basic_[d] = new (GetAllocator()) HPhi(GetAllocator(), d, 0, DataType::Type::kInt32); + loop_preheader_[d]->AddInstruction(new (GetAllocator()) HGoto()); loop_header_[d]->AddPhi(basic_[d]); - HInstruction* compare = new (&allocator_) HLessThan(basic_[d], constant100_); + HInstruction* compare = new (GetAllocator()) HLessThan(basic_[d], constant100_); loop_header_[d]->AddInstruction(compare); - loop_header_[d]->AddInstruction(new (&allocator_) HIf(compare)); - increment_[d] = new (&allocator_) HAdd(DataType::Type::kInt32, basic_[d], constant1_); + loop_header_[d]->AddInstruction(new (GetAllocator()) HIf(compare)); + increment_[d] = new (GetAllocator()) HAdd(DataType::Type::kInt32, basic_[d], constant1_); loop_body_[d]->AddInstruction(increment_[d]); - loop_body_[d]->AddInstruction(new (&allocator_) HGoto()); + loop_body_[d]->AddInstruction(new (GetAllocator()) HGoto()); basic_[d]->AddInput(constant0_); basic_[d]->AddInput(increment_[d]); @@ -125,9 +123,9 @@ class InductionVarAnalysisTest : public CommonCompilerTest { // Builds if-statement at depth d. HPhi* BuildIf(int d, HBasicBlock** ifT, HBasicBlock** ifF) { - HBasicBlock* cond = new (&allocator_) HBasicBlock(graph_); - HBasicBlock* ifTrue = new (&allocator_) HBasicBlock(graph_); - HBasicBlock* ifFalse = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* cond = new (GetAllocator()) HBasicBlock(graph_); + HBasicBlock* ifTrue = new (GetAllocator()) HBasicBlock(graph_); + HBasicBlock* ifFalse = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(cond); graph_->AddBlock(ifTrue); graph_->AddBlock(ifFalse); @@ -137,11 +135,11 @@ class InductionVarAnalysisTest : public CommonCompilerTest { cond->AddSuccessor(ifFalse); ifTrue->AddSuccessor(loop_body_[d]); ifFalse->AddSuccessor(loop_body_[d]); - cond->AddInstruction(new (&allocator_) HIf(parameter_)); + cond->AddInstruction(new (GetAllocator()) HIf(parameter_)); *ifT = ifTrue; *ifF = ifFalse; - HPhi* select_phi = new (&allocator_) HPhi(&allocator_, -1, 0, DataType::Type::kInt32); + HPhi* select_phi = new (GetAllocator()) HPhi(GetAllocator(), -1, 0, DataType::Type::kInt32); loop_body_[d]->AddPhi(select_phi); return select_phi; } @@ -154,7 +152,7 @@ class InductionVarAnalysisTest : public CommonCompilerTest { // Inserts a phi to loop header at depth d and returns it. HPhi* InsertLoopPhi(int vreg, int d) { - HPhi* phi = new (&allocator_) HPhi(&allocator_, vreg, 0, DataType::Type::kInt32); + HPhi* phi = new (GetAllocator()) HPhi(GetAllocator(), vreg, 0, DataType::Type::kInt32); loop_header_[d]->AddPhi(phi); return phi; } @@ -164,7 +162,7 @@ class InductionVarAnalysisTest : public CommonCompilerTest { HInstruction* InsertArrayStore(HInstruction* subscript, int d) { // ArraySet is given a float value in order to avoid SsaBuilder typing // it from the array's non-existent reference type info. - return InsertInstruction(new (&allocator_) HArraySet( + return InsertInstruction(new (GetAllocator()) HArraySet( parameter_, subscript, float_constant0_, DataType::Type::kFloat32, 0), d); } @@ -197,13 +195,11 @@ class InductionVarAnalysisTest : public CommonCompilerTest { // Performs InductionVarAnalysis (after proper set up). void PerformInductionVarAnalysis() { graph_->BuildDominatorTree(); - iva_ = new (&allocator_) HInductionVarAnalysis(graph_); + iva_ = new (GetAllocator()) HInductionVarAnalysis(graph_); iva_->Run(); } // General building fields. - ArenaPool pool_; - ArenaAllocator allocator_; HGraph* graph_; HInductionVarAnalysis* iva_; @@ -286,15 +282,15 @@ TEST_F(InductionVarAnalysisTest, FindDerivedInduction) { // } BuildLoopNest(1); HInstruction* add = InsertInstruction( - new (&allocator_) HAdd(DataType::Type::kInt32, constant100_, basic_[0]), 0); + new (GetAllocator()) HAdd(DataType::Type::kInt32, constant100_, basic_[0]), 0); HInstruction* sub = InsertInstruction( - new (&allocator_) HSub(DataType::Type::kInt32, constant100_, basic_[0]), 0); + new (GetAllocator()) HSub(DataType::Type::kInt32, constant100_, basic_[0]), 0); HInstruction* mul = InsertInstruction( - new (&allocator_) HMul(DataType::Type::kInt32, constant100_, basic_[0]), 0); + new (GetAllocator()) HMul(DataType::Type::kInt32, constant100_, basic_[0]), 0); HInstruction* shl = InsertInstruction( - new (&allocator_) HShl(DataType::Type::kInt32, basic_[0], constant1_), 0); + new (GetAllocator()) HShl(DataType::Type::kInt32, basic_[0], constant1_), 0); HInstruction* neg = InsertInstruction( - new (&allocator_) HNeg(DataType::Type::kInt32, basic_[0]), 0); + new (GetAllocator()) HNeg(DataType::Type::kInt32, basic_[0]), 0); PerformInductionVarAnalysis(); EXPECT_STREQ("((1) * i + (100)):Int32", GetInductionInfo(add, 0).c_str()); @@ -318,10 +314,10 @@ TEST_F(InductionVarAnalysisTest, FindChainInduction) { k_header->AddInput(constant0_); HInstruction* add = InsertInstruction( - new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant100_), 0); + new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant100_), 0); HInstruction* store1 = InsertArrayStore(add, 0); HInstruction* sub = InsertInstruction( - new (&allocator_) HSub(DataType::Type::kInt32, add, constant1_), 0); + new (GetAllocator()) HSub(DataType::Type::kInt32, add, constant1_), 0); HInstruction* store2 = InsertArrayStore(sub, 0); k_header->AddInput(sub); PerformInductionVarAnalysis(); @@ -351,11 +347,11 @@ TEST_F(InductionVarAnalysisTest, FindTwoWayBasicInduction) { HPhi* k_body = BuildIf(0, &ifTrue, &ifFalse); // True-branch. - HInstruction* inc1 = new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant1_); + HInstruction* inc1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant1_); ifTrue->AddInstruction(inc1); k_body->AddInput(inc1); // False-branch. - HInstruction* inc2 = new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant1_); + HInstruction* inc2 = new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant1_); ifFalse->AddInstruction(inc2); k_body->AddInput(inc2); // Merge over a phi. @@ -384,11 +380,11 @@ TEST_F(InductionVarAnalysisTest, FindTwoWayDerivedInduction) { HPhi* k = BuildIf(0, &ifTrue, &ifFalse); // True-branch. - HInstruction* inc1 = new (&allocator_) HAdd(DataType::Type::kInt32, basic_[0], constant1_); + HInstruction* inc1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, basic_[0], constant1_); ifTrue->AddInstruction(inc1); k->AddInput(inc1); // False-branch. - HInstruction* inc2 = new (&allocator_) HAdd(DataType::Type::kInt32, basic_[0], constant1_); + HInstruction* inc2 = new (GetAllocator()) HAdd(DataType::Type::kInt32, basic_[0], constant1_); ifFalse->AddInstruction(inc2); k->AddInput(inc2); // Merge over a phi. @@ -412,11 +408,11 @@ TEST_F(InductionVarAnalysisTest, AddLinear) { BuildLoopNest(1); HInstruction* add1 = InsertInstruction( - new (&allocator_) HAdd(DataType::Type::kInt32, basic_[0], basic_[0]), 0); + new (GetAllocator()) HAdd(DataType::Type::kInt32, basic_[0], basic_[0]), 0); HInstruction* add2 = InsertInstruction( - new (&allocator_) HAdd(DataType::Type::kInt32, constant7_, basic_[0]), 0); + new (GetAllocator()) HAdd(DataType::Type::kInt32, constant7_, basic_[0]), 0); HInstruction* add3 = InsertInstruction( - new (&allocator_) HAdd(DataType::Type::kInt32, add1, add2), 0); + new (GetAllocator()) HAdd(DataType::Type::kInt32, add1, add2), 0); PerformInductionVarAnalysis(); EXPECT_STREQ("((1) * i + (0)):Int32", GetInductionInfo(basic_[0], 0).c_str()); @@ -438,11 +434,11 @@ TEST_F(InductionVarAnalysisTest, FindPolynomialInduction) { k_header->AddInput(constant1_); HInstruction* mul = InsertInstruction( - new (&allocator_) HMul(DataType::Type::kInt32, basic_[0], constant2_), 0); + new (GetAllocator()) HMul(DataType::Type::kInt32, basic_[0], constant2_), 0); HInstruction* add = InsertInstruction( - new (&allocator_) HAdd(DataType::Type::kInt32, constant100_, mul), 0); + new (GetAllocator()) HAdd(DataType::Type::kInt32, constant100_, mul), 0); HInstruction* pol = InsertInstruction( - new (&allocator_) HAdd(DataType::Type::kInt32, add, k_header), 0); + new (GetAllocator()) HAdd(DataType::Type::kInt32, add, k_header), 0); k_header->AddInput(pol); PerformInductionVarAnalysis(); @@ -469,17 +465,17 @@ TEST_F(InductionVarAnalysisTest, FindPolynomialInductionAndDerived) { k_header->AddInput(constant1_); HInstruction* add = InsertInstruction( - new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant100_), 0); + new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant100_), 0); HInstruction* sub = InsertInstruction( - new (&allocator_) HSub(DataType::Type::kInt32, k_header, constant1_), 0); + new (GetAllocator()) HSub(DataType::Type::kInt32, k_header, constant1_), 0); HInstruction* neg = InsertInstruction( - new (&allocator_) HNeg(DataType::Type::kInt32, sub), 0); + new (GetAllocator()) HNeg(DataType::Type::kInt32, sub), 0); HInstruction* mul = InsertInstruction( - new (&allocator_) HMul(DataType::Type::kInt32, k_header, constant2_), 0); + new (GetAllocator()) HMul(DataType::Type::kInt32, k_header, constant2_), 0); HInstruction* shl = InsertInstruction( - new (&allocator_) HShl(DataType::Type::kInt32, k_header, constant2_), 0); + new (GetAllocator()) HShl(DataType::Type::kInt32, k_header, constant2_), 0); HInstruction* pol = InsertInstruction( - new (&allocator_) HAdd(DataType::Type::kInt32, k_header, basic_[0]), 0); + new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, basic_[0]), 0); k_header->AddInput(pol); PerformInductionVarAnalysis(); @@ -512,11 +508,11 @@ TEST_F(InductionVarAnalysisTest, AddPolynomial) { k_header->AddInput(constant7_); HInstruction* add1 = InsertInstruction( - new (&allocator_) HAdd(DataType::Type::kInt32, k_header, k_header), 0); + new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, k_header), 0); HInstruction* add2 = InsertInstruction( - new (&allocator_) HAdd(DataType::Type::kInt32, add1, k_header), 0); + new (GetAllocator()) HAdd(DataType::Type::kInt32, add1, k_header), 0); HInstruction* add3 = InsertInstruction( - new (&allocator_) HAdd(DataType::Type::kInt32, k_header, basic_[0]), 0); + new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, basic_[0]), 0); k_header->AddInput(add3); PerformInductionVarAnalysis(); @@ -542,7 +538,7 @@ TEST_F(InductionVarAnalysisTest, FindGeometricMulInduction) { k_header->AddInput(constant1_); HInstruction* mul = InsertInstruction( - new (&allocator_) HMul(DataType::Type::kInt32, k_header, constant100_), 0); + new (GetAllocator()) HMul(DataType::Type::kInt32, k_header, constant100_), 0); k_header->AddInput(mul); PerformInductionVarAnalysis(); @@ -567,19 +563,19 @@ TEST_F(InductionVarAnalysisTest, FindGeometricShlInductionAndDerived) { k_header->AddInput(constant1_); HInstruction* add1 = InsertInstruction( - new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant1_), 0); + new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant1_), 0); HInstruction* shl1 = InsertInstruction( - new (&allocator_) HShl(DataType::Type::kInt32, k_header, constant1_), 0); + new (GetAllocator()) HShl(DataType::Type::kInt32, k_header, constant1_), 0); HInstruction* add2 = InsertInstruction( - new (&allocator_) HAdd(DataType::Type::kInt32, shl1, constant100_), 0); + new (GetAllocator()) HAdd(DataType::Type::kInt32, shl1, constant100_), 0); HInstruction* sub = InsertInstruction( - new (&allocator_) HSub(DataType::Type::kInt32, shl1, constant1_), 0); + new (GetAllocator()) HSub(DataType::Type::kInt32, shl1, constant1_), 0); HInstruction* neg = InsertInstruction( - new (&allocator_) HNeg(DataType::Type::kInt32, sub), 0); + new (GetAllocator()) HNeg(DataType::Type::kInt32, sub), 0); HInstruction* mul = InsertInstruction( - new (&allocator_) HMul(DataType::Type::kInt32, shl1, constant2_), 0); + new (GetAllocator()) HMul(DataType::Type::kInt32, shl1, constant2_), 0); HInstruction* shl2 = InsertInstruction( - new (&allocator_) HShl(DataType::Type::kInt32, shl1, constant2_), 0); + new (GetAllocator()) HShl(DataType::Type::kInt32, shl1, constant2_), 0); k_header->AddInput(shl1); PerformInductionVarAnalysis(); @@ -610,17 +606,17 @@ TEST_F(InductionVarAnalysisTest, FindGeometricDivInductionAndDerived) { k_header->AddInput(constant1_); HInstruction* add = InsertInstruction( - new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant100_), 0); + new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant100_), 0); HInstruction* sub = InsertInstruction( - new (&allocator_) HSub(DataType::Type::kInt32, k_header, constant1_), 0); + new (GetAllocator()) HSub(DataType::Type::kInt32, k_header, constant1_), 0); HInstruction* neg = InsertInstruction( - new (&allocator_) HNeg(DataType::Type::kInt32, sub), 0); + new (GetAllocator()) HNeg(DataType::Type::kInt32, sub), 0); HInstruction* mul = InsertInstruction( - new (&allocator_) HMul(DataType::Type::kInt32, k_header, constant2_), 0); + new (GetAllocator()) HMul(DataType::Type::kInt32, k_header, constant2_), 0); HInstruction* shl = InsertInstruction( - new (&allocator_) HShl(DataType::Type::kInt32, k_header, constant2_), 0); + new (GetAllocator()) HShl(DataType::Type::kInt32, k_header, constant2_), 0); HInstruction* div = InsertInstruction( - new (&allocator_) HDiv(DataType::Type::kInt32, k_header, constant100_, kNoDexPc), 0); + new (GetAllocator()) HDiv(DataType::Type::kInt32, k_header, constant100_, kNoDexPc), 0); k_header->AddInput(div); PerformInductionVarAnalysis(); @@ -645,7 +641,7 @@ TEST_F(InductionVarAnalysisTest, FindGeometricShrInduction) { k_header->AddInput(constant100_); HInstruction* shr = InsertInstruction( - new (&allocator_) HShr(DataType::Type::kInt32, k_header, constant1_), 0); + new (GetAllocator()) HShr(DataType::Type::kInt32, k_header, constant1_), 0); k_header->AddInput(shr); PerformInductionVarAnalysis(); @@ -665,7 +661,7 @@ TEST_F(InductionVarAnalysisTest, FindNotGeometricShrInduction) { k_header->AddInput(constantm1_); HInstruction* shr = InsertInstruction( - new (&allocator_) HShr(DataType::Type::kInt32, k_header, constant1_), 0); + new (GetAllocator()) HShr(DataType::Type::kInt32, k_header, constant1_), 0); k_header->AddInput(shr); PerformInductionVarAnalysis(); @@ -689,17 +685,17 @@ TEST_F(InductionVarAnalysisTest, FindRemWrapAroundInductionAndDerived) { k_header->AddInput(constant100_); HInstruction* add = InsertInstruction( - new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant100_), 0); + new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant100_), 0); HInstruction* sub = InsertInstruction( - new (&allocator_) HSub(DataType::Type::kInt32, k_header, constant1_), 0); + new (GetAllocator()) HSub(DataType::Type::kInt32, k_header, constant1_), 0); HInstruction* neg = InsertInstruction( - new (&allocator_) HNeg(DataType::Type::kInt32, sub), 0); + new (GetAllocator()) HNeg(DataType::Type::kInt32, sub), 0); HInstruction* mul = InsertInstruction( - new (&allocator_) HMul(DataType::Type::kInt32, k_header, constant2_), 0); + new (GetAllocator()) HMul(DataType::Type::kInt32, k_header, constant2_), 0); HInstruction* shl = InsertInstruction( - new (&allocator_) HShl(DataType::Type::kInt32, k_header, constant2_), 0); + new (GetAllocator()) HShl(DataType::Type::kInt32, k_header, constant2_), 0); HInstruction* rem = InsertInstruction( - new (&allocator_) HRem(DataType::Type::kInt32, k_header, constant7_, kNoDexPc), 0); + new (GetAllocator()) HRem(DataType::Type::kInt32, k_header, constant7_, kNoDexPc), 0); k_header->AddInput(rem); PerformInductionVarAnalysis(); @@ -731,7 +727,7 @@ TEST_F(InductionVarAnalysisTest, FindFirstOrderWrapAroundInduction) { HInstruction* store = InsertArrayStore(k_header, 0); HInstruction* sub = InsertInstruction( - new (&allocator_) HSub(DataType::Type::kInt32, constant100_, basic_[0]), 0); + new (GetAllocator()) HSub(DataType::Type::kInt32, constant100_, basic_[0]), 0); k_header->AddInput(sub); PerformInductionVarAnalysis(); @@ -760,7 +756,7 @@ TEST_F(InductionVarAnalysisTest, FindSecondOrderWrapAroundInduction) { HInstruction* store = InsertArrayStore(k_header, 0); k_header->AddInput(t); HInstruction* sub = InsertInstruction( - new (&allocator_) HSub(DataType::Type::kInt32, constant100_, basic_[0], 0), 0); + new (GetAllocator()) HSub(DataType::Type::kInt32, constant100_, basic_[0], 0), 0); t->AddInput(sub); PerformInductionVarAnalysis(); @@ -785,19 +781,19 @@ TEST_F(InductionVarAnalysisTest, FindWrapAroundDerivedInduction) { k_header->AddInput(constant0_); HInstruction* add = InsertInstruction( - new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant100_), 0); + new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant100_), 0); HInstruction* sub = InsertInstruction( - new (&allocator_) HSub(DataType::Type::kInt32, k_header, constant100_), 0); + new (GetAllocator()) HSub(DataType::Type::kInt32, k_header, constant100_), 0); HInstruction* mul = InsertInstruction( - new (&allocator_) HMul(DataType::Type::kInt32, k_header, constant100_), 0); + new (GetAllocator()) HMul(DataType::Type::kInt32, k_header, constant100_), 0); HInstruction* shl1 = InsertInstruction( - new (&allocator_) HShl(DataType::Type::kInt32, k_header, constant1_), 0); + new (GetAllocator()) HShl(DataType::Type::kInt32, k_header, constant1_), 0); HInstruction* neg1 = InsertInstruction( - new (&allocator_) HNeg(DataType::Type::kInt32, k_header), 0); + new (GetAllocator()) HNeg(DataType::Type::kInt32, k_header), 0); HInstruction* shl2 = InsertInstruction( - new (&allocator_) HShl(DataType::Type::kInt32, basic_[0], constant1_), 0); + new (GetAllocator()) HShl(DataType::Type::kInt32, basic_[0], constant1_), 0); HInstruction* neg2 = InsertInstruction( - new (&allocator_) HNeg(DataType::Type::kInt32, shl2), 0); + new (GetAllocator()) HNeg(DataType::Type::kInt32, shl2), 0); k_header->AddInput(shl2); PerformInductionVarAnalysis(); @@ -856,7 +852,7 @@ TEST_F(InductionVarAnalysisTest, FindIdiomaticPeriodicInduction) { HInstruction* store = InsertArrayStore(k_header, 0); HInstruction* sub = InsertInstruction( - new (&allocator_) HSub(DataType::Type::kInt32, constant1_, k_header), 0); + new (GetAllocator()) HSub(DataType::Type::kInt32, constant1_, k_header), 0); k_header->AddInput(sub); PerformInductionVarAnalysis(); @@ -877,7 +873,7 @@ TEST_F(InductionVarAnalysisTest, FindXorPeriodicInduction) { HInstruction* store = InsertArrayStore(k_header, 0); HInstruction* x = InsertInstruction( - new (&allocator_) HXor(DataType::Type::kInt32, k_header, constant1_), 0); + new (GetAllocator()) HXor(DataType::Type::kInt32, k_header, constant1_), 0); k_header->AddInput(x); PerformInductionVarAnalysis(); @@ -896,7 +892,7 @@ TEST_F(InductionVarAnalysisTest, FindXorConstantLeftPeriodicInduction) { k_header->AddInput(constant1_); HInstruction* x = InsertInstruction( - new (&allocator_) HXor(DataType::Type::kInt32, constant1_, k_header), 0); + new (GetAllocator()) HXor(DataType::Type::kInt32, constant1_, k_header), 0); k_header->AddInput(x); PerformInductionVarAnalysis(); @@ -915,7 +911,7 @@ TEST_F(InductionVarAnalysisTest, FindXor100PeriodicInduction) { k_header->AddInput(constant1_); HInstruction* x = InsertInstruction( - new (&allocator_) HXor(DataType::Type::kInt32, k_header, constant100_), 0); + new (GetAllocator()) HXor(DataType::Type::kInt32, k_header, constant100_), 0); k_header->AddInput(x); PerformInductionVarAnalysis(); @@ -933,7 +929,7 @@ TEST_F(InductionVarAnalysisTest, FindBooleanEqPeriodicInduction) { HPhi* k_header = InsertLoopPhi(0, 0); k_header->AddInput(constant0_); - HInstruction* x = InsertInstruction(new (&allocator_) HEqual(k_header, constant0_), 0); + HInstruction* x = InsertInstruction(new (GetAllocator()) HEqual(k_header, constant0_), 0); k_header->AddInput(x); PerformInductionVarAnalysis(); @@ -951,7 +947,7 @@ TEST_F(InductionVarAnalysisTest, FindBooleanEqConstantLeftPeriodicInduction) { HPhi* k_header = InsertLoopPhi(0, 0); k_header->AddInput(constant0_); - HInstruction* x = InsertInstruction(new (&allocator_) HEqual(constant0_, k_header), 0); + HInstruction* x = InsertInstruction(new (GetAllocator()) HEqual(constant0_, k_header), 0); k_header->AddInput(x); PerformInductionVarAnalysis(); @@ -969,7 +965,7 @@ TEST_F(InductionVarAnalysisTest, FindBooleanNePeriodicInduction) { HPhi* k_header = InsertLoopPhi(0, 0); k_header->AddInput(constant0_); - HInstruction* x = InsertInstruction(new (&allocator_) HNotEqual(k_header, constant1_), 0); + HInstruction* x = InsertInstruction(new (GetAllocator()) HNotEqual(k_header, constant1_), 0); k_header->AddInput(x); PerformInductionVarAnalysis(); @@ -987,7 +983,7 @@ TEST_F(InductionVarAnalysisTest, FindBooleanNeConstantLeftPeriodicInduction) { HPhi* k_header = InsertLoopPhi(0, 0); k_header->AddInput(constant0_); - HInstruction* x = InsertInstruction(new (&allocator_) HNotEqual(constant1_, k_header), 0); + HInstruction* x = InsertInstruction(new (GetAllocator()) HNotEqual(constant1_, k_header), 0); k_header->AddInput(x); PerformInductionVarAnalysis(); @@ -1012,19 +1008,19 @@ TEST_F(InductionVarAnalysisTest, FindDerivedPeriodicInduction) { k_header->AddInput(constant0_); HInstruction* neg1 = InsertInstruction( - new (&allocator_) HNeg(DataType::Type::kInt32, k_header), 0); + new (GetAllocator()) HNeg(DataType::Type::kInt32, k_header), 0); HInstruction* idiom = InsertInstruction( - new (&allocator_) HSub(DataType::Type::kInt32, constant1_, k_header), 0); + new (GetAllocator()) HSub(DataType::Type::kInt32, constant1_, k_header), 0); HInstruction* add = InsertInstruction( - new (&allocator_) HAdd(DataType::Type::kInt32, idiom, constant100_), 0); + new (GetAllocator()) HAdd(DataType::Type::kInt32, idiom, constant100_), 0); HInstruction* sub = InsertInstruction( - new (&allocator_) HSub(DataType::Type::kInt32, idiom, constant100_), 0); + new (GetAllocator()) HSub(DataType::Type::kInt32, idiom, constant100_), 0); HInstruction* mul = InsertInstruction( - new (&allocator_) HMul(DataType::Type::kInt32, idiom, constant100_), 0); + new (GetAllocator()) HMul(DataType::Type::kInt32, idiom, constant100_), 0); HInstruction* shl = InsertInstruction( - new (&allocator_) HShl(DataType::Type::kInt32, idiom, constant1_), 0); + new (GetAllocator()) HShl(DataType::Type::kInt32, idiom, constant1_), 0); HInstruction* neg2 = InsertInstruction( - new (&allocator_) HNeg(DataType::Type::kInt32, idiom), 0); + new (GetAllocator()) HNeg(DataType::Type::kInt32, idiom), 0); k_header->AddInput(idiom); PerformInductionVarAnalysis(); @@ -1057,7 +1053,7 @@ TEST_F(InductionVarAnalysisTest, FindDeepLoopInduction) { } HInstruction* inc = InsertInstruction( - new (&allocator_) HAdd(DataType::Type::kInt32, constant1_, k_header[9]), 9); + new (GetAllocator()) HAdd(DataType::Type::kInt32, constant1_, k_header[9]), 9); HInstruction* store = InsertArrayStore(inc, 9); for (int d = 0; d < 10; d++) { @@ -1091,7 +1087,7 @@ TEST_F(InductionVarAnalysisTest, ByteInductionIntLoopControl) { // } BuildLoopNest(1); HInstruction* conv = InsertInstruction( - new (&allocator_) HTypeConversion(DataType::Type::kInt8, basic_[0], kNoDexPc), 0); + new (GetAllocator()) HTypeConversion(DataType::Type::kInt8, basic_[0], kNoDexPc), 0); HInstruction* store1 = InsertArrayStore(conv, 0); HInstruction* store2 = InsertArrayStore(basic_[0], 0); PerformInductionVarAnalysis(); @@ -1122,10 +1118,10 @@ TEST_F(InductionVarAnalysisTest, ByteInductionDerivedIntLoopControl) { // } BuildLoopNest(1); HInstruction* conv = InsertInstruction( - new (&allocator_) HTypeConversion(DataType::Type::kInt8, basic_[0], kNoDexPc), 0); + new (GetAllocator()) HTypeConversion(DataType::Type::kInt8, basic_[0], kNoDexPc), 0); HInstruction* store1 = InsertArrayStore(conv, 0); HInstruction* add = InsertInstruction( - new (&allocator_) HAdd(DataType::Type::kInt32, conv, constant1_), 0); + new (GetAllocator()) HAdd(DataType::Type::kInt32, conv, constant1_), 0); HInstruction* store2 = InsertArrayStore(add, 0); PerformInductionVarAnalysis(); @@ -1152,9 +1148,9 @@ TEST_F(InductionVarAnalysisTest, ByteInduction) { k_header->AddInput(graph_->GetIntConstant(-128)); HInstruction* add = InsertInstruction( - new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant1_), 0); + new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant1_), 0); HInstruction* conv = InsertInstruction( - new (&allocator_) HTypeConversion(DataType::Type::kInt8, add, kNoDexPc), 0); + new (GetAllocator()) HTypeConversion(DataType::Type::kInt8, add, kNoDexPc), 0); k_header->AddInput(conv); PerformInductionVarAnalysis(); @@ -1180,9 +1176,9 @@ TEST_F(InductionVarAnalysisTest, NoByteInduction1) { k_header->AddInput(graph_->GetIntConstant(-129)); HInstruction* add = InsertInstruction( - new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant1_), 0); + new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant1_), 0); HInstruction* conv = InsertInstruction( - new (&allocator_) HTypeConversion(DataType::Type::kInt8, add, kNoDexPc), 0); + new (GetAllocator()) HTypeConversion(DataType::Type::kInt8, add, kNoDexPc), 0); k_header->AddInput(conv); PerformInductionVarAnalysis(); @@ -1202,9 +1198,9 @@ TEST_F(InductionVarAnalysisTest, NoByteInduction2) { k_header->AddInput(constant0_); HInstruction* conv = InsertInstruction( - new (&allocator_) HTypeConversion(DataType::Type::kInt8, k_header, kNoDexPc), 0); + new (GetAllocator()) HTypeConversion(DataType::Type::kInt8, k_header, kNoDexPc), 0); HInstruction* add = InsertInstruction( - new (&allocator_) HAdd(DataType::Type::kInt32, conv, constant1_), 0); + new (GetAllocator()) HAdd(DataType::Type::kInt32, conv, constant1_), 0); k_header->AddInput(add); PerformInductionVarAnalysis(); @@ -1221,7 +1217,7 @@ TEST_F(InductionVarAnalysisTest, ByteLoopControl1) { HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious(); ifs->ReplaceInput(graph_->GetIntConstant(127), 1); HInstruction* conv = - new (&allocator_) HTypeConversion(DataType::Type::kInt8, increment_[0], kNoDexPc); + new (GetAllocator()) HTypeConversion(DataType::Type::kInt8, increment_[0], kNoDexPc); loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext()); basic_[0]->ReplaceInput(conv, 1); PerformInductionVarAnalysis(); @@ -1247,7 +1243,7 @@ TEST_F(InductionVarAnalysisTest, ByteLoopControl2) { HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious(); ifs->ReplaceInput(graph_->GetIntConstant(128), 1); HInstruction* conv = - new (&allocator_) HTypeConversion(DataType::Type::kInt8, increment_[0], kNoDexPc); + new (GetAllocator()) HTypeConversion(DataType::Type::kInt8, increment_[0], kNoDexPc); loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext()); basic_[0]->ReplaceInput(conv, 1); PerformInductionVarAnalysis(); @@ -1273,7 +1269,7 @@ TEST_F(InductionVarAnalysisTest, ShortLoopControl1) { HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious(); ifs->ReplaceInput(graph_->GetIntConstant(32767), 1); HInstruction* conv = - new (&allocator_) HTypeConversion(DataType::Type::kInt16, increment_[0], kNoDexPc); + new (GetAllocator()) HTypeConversion(DataType::Type::kInt16, increment_[0], kNoDexPc); loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext()); basic_[0]->ReplaceInput(conv, 1); PerformInductionVarAnalysis(); @@ -1299,7 +1295,7 @@ TEST_F(InductionVarAnalysisTest, ShortLoopControl2) { HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious(); ifs->ReplaceInput(graph_->GetIntConstant(32768), 1); HInstruction* conv = - new (&allocator_) HTypeConversion(DataType::Type::kInt16, increment_[0], kNoDexPc); + new (GetAllocator()) HTypeConversion(DataType::Type::kInt16, increment_[0], kNoDexPc); loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext()); basic_[0]->ReplaceInput(conv, 1); PerformInductionVarAnalysis(); @@ -1324,7 +1320,7 @@ TEST_F(InductionVarAnalysisTest, CharLoopControl1) { HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious(); ifs->ReplaceInput(graph_->GetIntConstant(65535), 1); HInstruction* conv = - new (&allocator_) HTypeConversion(DataType::Type::kUint16, increment_[0], kNoDexPc); + new (GetAllocator()) HTypeConversion(DataType::Type::kUint16, increment_[0], kNoDexPc); loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext()); basic_[0]->ReplaceInput(conv, 1); PerformInductionVarAnalysis(); @@ -1349,7 +1345,7 @@ TEST_F(InductionVarAnalysisTest, CharLoopControl2) { HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious(); ifs->ReplaceInput(graph_->GetIntConstant(65536), 1); HInstruction* conv = - new (&allocator_) HTypeConversion(DataType::Type::kUint16, increment_[0], kNoDexPc); + new (GetAllocator()) HTypeConversion(DataType::Type::kUint16, increment_[0], kNoDexPc); loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext()); basic_[0]->ReplaceInput(conv, 1); PerformInductionVarAnalysis(); diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc index ab6fbae248..99dec11240 100644 --- a/compiler/optimizing/induction_var_range.cc +++ b/compiler/optimizing/induction_var_range.cc @@ -418,7 +418,8 @@ HInstruction* InductionVarRange::GenerateTripCount(HLoopInformation* loop, if (GenerateCode(trip->op_a, nullptr, graph, block, &trip_expr, false, false)) { if (taken_test != nullptr) { HInstruction* zero = graph->GetConstant(trip->type, 0); - trip_expr = Insert(block, new (graph->GetArena()) HSelect(taken_test, trip_expr, zero, kNoDexPc)); + ArenaAllocator* allocator = graph->GetAllocator(); + trip_expr = Insert(block, new (allocator) HSelect(taken_test, trip_expr, zero, kNoDexPc)); } return trip_expr; } @@ -1059,7 +1060,7 @@ bool InductionVarRange::GenerateLastValuePolynomial(HInductionVarAnalysis::Induc sum = static_cast<int32_t>(sum); // okay to truncate } *result = - Insert(block, new (graph->GetArena()) HAdd(type, graph->GetConstant(type, sum), c)); + Insert(block, new (graph->GetAllocator()) HAdd(type, graph->GetConstant(type, sum), c)); } return true; } @@ -1104,12 +1105,13 @@ bool InductionVarRange::GenerateLastValueGeometric(HInductionVarAnalysis::Induct } else { // Last value: a * f ^ m + b or a * f ^ -m + b. HInstruction* e = nullptr; + ArenaAllocator* allocator = graph->GetAllocator(); if (info->operation == HInductionVarAnalysis::kMul) { - e = new (graph->GetArena()) HMul(type, opa, graph->GetConstant(type, fpow)); + e = new (allocator) HMul(type, opa, graph->GetConstant(type, fpow)); } else { - e = new (graph->GetArena()) HDiv(type, opa, graph->GetConstant(type, fpow), kNoDexPc); + e = new (allocator) HDiv(type, opa, graph->GetConstant(type, fpow), kNoDexPc); } - *result = Insert(block, new (graph->GetArena()) HAdd(type, Insert(block, e), opb)); + *result = Insert(block, new (allocator) HAdd(type, Insert(block, e), opb)); } } return true; @@ -1190,18 +1192,20 @@ bool InductionVarRange::GenerateLastValuePeriodic(HInductionVarAnalysis::Inducti // During actual code generation (graph != nullptr), generate is_even ? x : y. if (graph != nullptr) { DataType::Type type = trip->type; + ArenaAllocator* allocator = graph->GetAllocator(); HInstruction* msk = - Insert(block, new (graph->GetArena()) HAnd(type, t, graph->GetConstant(type, 1))); + Insert(block, new (allocator) HAnd(type, t, graph->GetConstant(type, 1))); HInstruction* is_even = - Insert(block, new (graph->GetArena()) HEqual(msk, graph->GetConstant(type, 0), kNoDexPc)); - *result = Insert(block, new (graph->GetArena()) HSelect(is_even, x, y, kNoDexPc)); + Insert(block, new (allocator) HEqual(msk, graph->GetConstant(type, 0), kNoDexPc)); + *result = Insert(block, new (graph->GetAllocator()) HSelect(is_even, x, y, kNoDexPc)); } // Guard select with taken test if needed. if (*needs_taken_test) { HInstruction* is_taken = nullptr; if (GenerateCode(trip->op_b, nullptr, graph, block, graph ? &is_taken : nullptr, false, false)) { if (graph != nullptr) { - *result = Insert(block, new (graph->GetArena()) HSelect(is_taken, *result, x, kNoDexPc)); + ArenaAllocator* allocator = graph->GetAllocator(); + *result = Insert(block, new (allocator) HSelect(is_taken, *result, x, kNoDexPc)); } *needs_taken_test = false; // taken care of } else { @@ -1250,25 +1254,25 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info, HInstruction* operation = nullptr; switch (info->operation) { case HInductionVarAnalysis::kAdd: - operation = new (graph->GetArena()) HAdd(type, opa, opb); break; + operation = new (graph->GetAllocator()) HAdd(type, opa, opb); break; case HInductionVarAnalysis::kSub: - operation = new (graph->GetArena()) HSub(type, opa, opb); break; + operation = new (graph->GetAllocator()) HSub(type, opa, opb); break; case HInductionVarAnalysis::kMul: - operation = new (graph->GetArena()) HMul(type, opa, opb, kNoDexPc); break; + operation = new (graph->GetAllocator()) HMul(type, opa, opb, kNoDexPc); break; case HInductionVarAnalysis::kDiv: - operation = new (graph->GetArena()) HDiv(type, opa, opb, kNoDexPc); break; + operation = new (graph->GetAllocator()) HDiv(type, opa, opb, kNoDexPc); break; case HInductionVarAnalysis::kRem: - operation = new (graph->GetArena()) HRem(type, opa, opb, kNoDexPc); break; + operation = new (graph->GetAllocator()) HRem(type, opa, opb, kNoDexPc); break; case HInductionVarAnalysis::kXor: - operation = new (graph->GetArena()) HXor(type, opa, opb); break; + operation = new (graph->GetAllocator()) HXor(type, opa, opb); break; case HInductionVarAnalysis::kLT: - operation = new (graph->GetArena()) HLessThan(opa, opb); break; + operation = new (graph->GetAllocator()) HLessThan(opa, opb); break; case HInductionVarAnalysis::kLE: - operation = new (graph->GetArena()) HLessThanOrEqual(opa, opb); break; + operation = new (graph->GetAllocator()) HLessThanOrEqual(opa, opb); break; case HInductionVarAnalysis::kGT: - operation = new (graph->GetArena()) HGreaterThan(opa, opb); break; + operation = new (graph->GetAllocator()) HGreaterThan(opa, opb); break; case HInductionVarAnalysis::kGE: - operation = new (graph->GetArena()) HGreaterThanOrEqual(opa, opb); break; + operation = new (graph->GetAllocator()) HGreaterThanOrEqual(opa, opb); break; default: LOG(FATAL) << "unknown operation"; } @@ -1280,7 +1284,7 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info, case HInductionVarAnalysis::kNeg: if (GenerateCode(info->op_b, trip, graph, block, &opb, in_body, !is_min)) { if (graph != nullptr) { - *result = Insert(block, new (graph->GetArena()) HNeg(type, opb)); + *result = Insert(block, new (graph->GetAllocator()) HNeg(type, opb)); } return true; } @@ -1306,9 +1310,9 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info, } else if (in_body) { if (GenerateCode(info->op_a, trip, graph, block, &opb, in_body, is_min)) { if (graph != nullptr) { + ArenaAllocator* allocator = graph->GetAllocator(); *result = - Insert(block, - new (graph->GetArena()) HSub(type, opb, graph->GetConstant(type, 1))); + Insert(block, new (allocator) HSub(type, opb, graph->GetConstant(type, 1))); } return true; } @@ -1333,15 +1337,16 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info, if (GenerateCode(trip, trip, graph, block, &opa, in_body, is_min_a) && GenerateCode(info->op_b, trip, graph, block, &opb, in_body, is_min)) { if (graph != nullptr) { + ArenaAllocator* allocator = graph->GetAllocator(); HInstruction* oper; if (stride_value == 1) { - oper = new (graph->GetArena()) HAdd(type, opa, opb); + oper = new (allocator) HAdd(type, opa, opb); } else if (stride_value == -1) { - oper = new (graph->GetArena()) HSub(type, opb, opa); + oper = new (graph->GetAllocator()) HSub(type, opb, opa); } else { HInstruction* mul = - new (graph->GetArena()) HMul(type, graph->GetConstant(type, stride_value), opa); - oper = new (graph->GetArena()) HAdd(type, Insert(block, mul), opb); + new (allocator) HMul(type, graph->GetConstant(type, stride_value), opa); + oper = new (allocator) HAdd(type, Insert(block, mul), opb); } *result = Insert(block, oper); } diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc index 1c8426954b..e5bc6ef22c 100644 --- a/compiler/optimizing/induction_var_range_test.cc +++ b/compiler/optimizing/induction_var_range_test.cc @@ -29,13 +29,11 @@ using Value = InductionVarRange::Value; /** * Fixture class for the InductionVarRange tests. */ -class InductionVarRangeTest : public CommonCompilerTest { +class InductionVarRangeTest : public OptimizingUnitTest { public: InductionVarRangeTest() - : pool_(), - allocator_(&pool_), - graph_(CreateGraph(&allocator_)), - iva_(new (&allocator_) HInductionVarAnalysis(graph_)), + : graph_(CreateGraph()), + iva_(new (GetAllocator()) HInductionVarAnalysis(graph_)), range_(iva_) { BuildGraph(); } @@ -61,22 +59,22 @@ class InductionVarRangeTest : public CommonCompilerTest { /** Constructs bare minimum graph. */ void BuildGraph() { graph_->SetNumberOfVRegs(1); - entry_block_ = new (&allocator_) HBasicBlock(graph_); - exit_block_ = new (&allocator_) HBasicBlock(graph_); + entry_block_ = new (GetAllocator()) HBasicBlock(graph_); + exit_block_ = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(entry_block_); graph_->AddBlock(exit_block_); graph_->SetEntryBlock(entry_block_); graph_->SetExitBlock(exit_block_); // Two parameters. - x_ = new (&allocator_) HParameterValue(graph_->GetDexFile(), - dex::TypeIndex(0), - 0, - DataType::Type::kInt32); + x_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(), + dex::TypeIndex(0), + 0, + DataType::Type::kInt32); entry_block_->AddInstruction(x_); - y_ = new (&allocator_) HParameterValue(graph_->GetDexFile(), - dex::TypeIndex(0), - 0, - DataType::Type::kInt32); + y_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(), + dex::TypeIndex(0), + 0, + DataType::Type::kInt32); entry_block_->AddInstruction(y_); // Set arbitrary range analysis hint while testing private methods. SetHint(x_); @@ -85,13 +83,13 @@ class InductionVarRangeTest : public CommonCompilerTest { /** Constructs loop with given upper bound. */ void BuildLoop(int32_t lower, HInstruction* upper, int32_t stride) { // Control flow. - loop_preheader_ = new (&allocator_) HBasicBlock(graph_); + loop_preheader_ = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(loop_preheader_); - loop_header_ = new (&allocator_) HBasicBlock(graph_); + loop_header_ = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(loop_header_); - loop_body_ = new (&allocator_) HBasicBlock(graph_); + loop_body_ = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(loop_body_); - HBasicBlock* return_block = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* return_block = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(return_block); entry_block_->AddSuccessor(loop_preheader_); loop_preheader_->AddSuccessor(loop_header_); @@ -100,24 +98,24 @@ class InductionVarRangeTest : public CommonCompilerTest { loop_body_->AddSuccessor(loop_header_); return_block->AddSuccessor(exit_block_); // Instructions. - loop_preheader_->AddInstruction(new (&allocator_) HGoto()); - HPhi* phi = new (&allocator_) HPhi(&allocator_, 0, 0, DataType::Type::kInt32); + loop_preheader_->AddInstruction(new (GetAllocator()) HGoto()); + HPhi* phi = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32); loop_header_->AddPhi(phi); phi->AddInput(graph_->GetIntConstant(lower)); // i = l if (stride > 0) { - condition_ = new (&allocator_) HLessThan(phi, upper); // i < u + condition_ = new (GetAllocator()) HLessThan(phi, upper); // i < u } else { - condition_ = new (&allocator_) HGreaterThan(phi, upper); // i > u + condition_ = new (GetAllocator()) HGreaterThan(phi, upper); // i > u } loop_header_->AddInstruction(condition_); - loop_header_->AddInstruction(new (&allocator_) HIf(condition_)); + loop_header_->AddInstruction(new (GetAllocator()) HIf(condition_)); increment_ = - new (&allocator_) HAdd(DataType::Type::kInt32, phi, graph_->GetIntConstant(stride)); + new (GetAllocator()) HAdd(DataType::Type::kInt32, phi, graph_->GetIntConstant(stride)); loop_body_->AddInstruction(increment_); // i += s phi->AddInput(increment_); - loop_body_->AddInstruction(new (&allocator_) HGoto()); - return_block->AddInstruction(new (&allocator_) HReturnVoid()); - exit_block_->AddInstruction(new (&allocator_) HExit()); + loop_body_->AddInstruction(new (GetAllocator()) HGoto()); + return_block->AddInstruction(new (GetAllocator()) HReturnVoid()); + exit_block_->AddInstruction(new (GetAllocator()) HExit()); } /** Constructs SSA and performs induction variable analysis. */ @@ -304,8 +302,6 @@ class InductionVarRangeTest : public CommonCompilerTest { Value MaxValue(Value v1, Value v2) { return range_.MergeVal(v1, v2, false); } // General building fields. - ArenaPool pool_; - ArenaAllocator allocator_; HGraph* graph_; HBasicBlock* entry_block_; HBasicBlock* exit_block_; @@ -705,9 +701,9 @@ TEST_F(InductionVarRangeTest, MaxValue) { TEST_F(InductionVarRangeTest, ArrayLengthAndHints) { // We pass a bogus constant for the class to avoid mocking one. - HInstruction* new_array = new (&allocator_) HNewArray(x_, x_, 0); + HInstruction* new_array = new (GetAllocator()) HNewArray(x_, x_, 0); entry_block_->AddInstruction(new_array); - HInstruction* array_length = new (&allocator_) HArrayLength(new_array, 0); + HInstruction* array_length = new (GetAllocator()) HArrayLength(new_array, 0); entry_block_->AddInstruction(array_length); // With null hint: yields extreme constants. const int32_t max_value = std::numeric_limits<int32_t>::max(); @@ -725,13 +721,13 @@ TEST_F(InductionVarRangeTest, ArrayLengthAndHints) { } TEST_F(InductionVarRangeTest, AddOrSubAndConstant) { - HInstruction* add = new (&allocator_) + HInstruction* add = new (GetAllocator()) HAdd(DataType::Type::kInt32, x_, graph_->GetIntConstant(-1)); - HInstruction* alt = new (&allocator_) + HInstruction* alt = new (GetAllocator()) HAdd(DataType::Type::kInt32, graph_->GetIntConstant(-1), x_); - HInstruction* sub = new (&allocator_) + HInstruction* sub = new (GetAllocator()) HSub(DataType::Type::kInt32, x_, graph_->GetIntConstant(1)); - HInstruction* rev = new (&allocator_) + HInstruction* rev = new (GetAllocator()) HSub(DataType::Type::kInt32, graph_->GetIntConstant(1), x_); entry_block_->AddInstruction(add); entry_block_->AddInstruction(alt); diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 90e3d2ade7..4d846fa4ed 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -705,7 +705,7 @@ HInstanceFieldGet* HInliner::BuildGetReceiverClass(ClassLinker* class_linker, uint32_t dex_pc) const { ArtField* field = class_linker->GetClassRoot(ClassLinker::kJavaLangObject)->GetInstanceField(0); DCHECK_EQ(std::string(field->GetName()), "shadow$_klass_"); - HInstanceFieldGet* result = new (graph_->GetArena()) HInstanceFieldGet( + HInstanceFieldGet* result = new (graph_->GetAllocator()) HInstanceFieldGet( receiver, field, DataType::Type::kReference, @@ -812,12 +812,12 @@ void HInliner::AddCHAGuard(HInstruction* invoke_instruction, uint32_t dex_pc, HInstruction* cursor, HBasicBlock* bb_cursor) { - HShouldDeoptimizeFlag* deopt_flag = new (graph_->GetArena()) - HShouldDeoptimizeFlag(graph_->GetArena(), dex_pc); - HInstruction* compare = new (graph_->GetArena()) HNotEqual( + HShouldDeoptimizeFlag* deopt_flag = new (graph_->GetAllocator()) + HShouldDeoptimizeFlag(graph_->GetAllocator(), dex_pc); + HInstruction* compare = new (graph_->GetAllocator()) HNotEqual( deopt_flag, graph_->GetIntConstant(0, dex_pc)); - HInstruction* deopt = new (graph_->GetArena()) HDeoptimize( - graph_->GetArena(), compare, DeoptimizationKind::kCHA, dex_pc); + HInstruction* deopt = new (graph_->GetAllocator()) HDeoptimize( + graph_->GetAllocator(), compare, DeoptimizationKind::kCHA, dex_pc); if (cursor != nullptr) { bb_cursor->InsertInstructionAfter(deopt_flag, cursor); @@ -865,13 +865,13 @@ HInstruction* HInliner::AddTypeGuard(HInstruction* receiver, // Note that we will just compare the classes, so we don't need Java semantics access checks. // Note that the type index and the dex file are relative to the method this type guard is // inlined into. - HLoadClass* load_class = new (graph_->GetArena()) HLoadClass(graph_->GetCurrentMethod(), - class_index, - caller_dex_file, - klass, - is_referrer, - invoke_instruction->GetDexPc(), - /* needs_access_check */ false); + HLoadClass* load_class = new (graph_->GetAllocator()) HLoadClass(graph_->GetCurrentMethod(), + class_index, + caller_dex_file, + klass, + is_referrer, + invoke_instruction->GetDexPc(), + /* needs_access_check */ false); HLoadClass::LoadKind kind = HSharpening::ComputeLoadClassKind( load_class, codegen_, compiler_driver_, caller_compilation_unit_); DCHECK(kind != HLoadClass::LoadKind::kInvalid) @@ -887,11 +887,11 @@ HInstruction* HInliner::AddTypeGuard(HInstruction* receiver, load_class->CopyEnvironmentFrom(invoke_instruction->GetEnvironment()); } - HNotEqual* compare = new (graph_->GetArena()) HNotEqual(load_class, receiver_class); + HNotEqual* compare = new (graph_->GetAllocator()) HNotEqual(load_class, receiver_class); bb_cursor->InsertInstructionAfter(compare, load_class); if (with_deoptimization) { - HDeoptimize* deoptimize = new (graph_->GetArena()) HDeoptimize( - graph_->GetArena(), + HDeoptimize* deoptimize = new (graph_->GetAllocator()) HDeoptimize( + graph_->GetAllocator(), compare, receiver, Runtime::Current()->IsAotCompiler() @@ -1012,7 +1012,7 @@ void HInliner::CreateDiamondPatternForPolymorphicInline(HInstruction* compare, uint32_t dex_pc = invoke_instruction->GetDexPc(); HBasicBlock* cursor_block = compare->GetBlock(); HBasicBlock* original_invoke_block = invoke_instruction->GetBlock(); - ArenaAllocator* allocator = graph_->GetArena(); + ArenaAllocator* allocator = graph_->GetAllocator(); // Spit the block after the compare: `cursor_block` will now be the start of the diamond, // and the returned block is the start of the then branch (that could contain multiple blocks). @@ -1147,7 +1147,7 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget( DataType::Type type = Is64BitInstructionSet(graph_->GetInstructionSet()) ? DataType::Type::kInt64 : DataType::Type::kInt32; - HClassTableGet* class_table_get = new (graph_->GetArena()) HClassTableGet( + HClassTableGet* class_table_get = new (graph_->GetAllocator()) HClassTableGet( receiver_class, type, invoke_instruction->IsInvokeVirtual() ? HClassTableGet::TableKind::kVTable @@ -1164,7 +1164,7 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget( reinterpret_cast<intptr_t>(actual_method), invoke_instruction->GetDexPc()); } - HNotEqual* compare = new (graph_->GetArena()) HNotEqual(class_table_get, constant); + HNotEqual* compare = new (graph_->GetAllocator()) HNotEqual(class_table_get, constant); if (cursor != nullptr) { bb_cursor->InsertInstructionAfter(receiver_class, cursor); } else { @@ -1176,8 +1176,8 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget( if (outermost_graph_->IsCompilingOsr()) { CreateDiamondPatternForPolymorphicInline(compare, return_replacement, invoke_instruction); } else { - HDeoptimize* deoptimize = new (graph_->GetArena()) HDeoptimize( - graph_->GetArena(), + HDeoptimize* deoptimize = new (graph_->GetAllocator()) HDeoptimize( + graph_->GetAllocator(), compare, receiver, DeoptimizationKind::kJitSameTarget, @@ -1240,8 +1240,8 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction, if (dex_method_index == dex::kDexNoIndex) { return false; } - HInvokeVirtual* new_invoke = new (graph_->GetArena()) HInvokeVirtual( - graph_->GetArena(), + HInvokeVirtual* new_invoke = new (graph_->GetAllocator()) HInvokeVirtual( + graph_->GetAllocator(), invoke_instruction->GetNumberOfArguments(), invoke_instruction->GetType(), invoke_instruction->GetDexPc(), @@ -1517,7 +1517,7 @@ bool HInliner::TryPatternSubstitution(HInvoke* invoke_instruction, DCHECK(obj != nullptr) << "only non-static methods can have a constructor fence"; HConstructorFence* constructor_fence = - new (graph_->GetArena()) HConstructorFence(obj, kNoDexPc, graph_->GetArena()); + new (graph_->GetAllocator()) HConstructorFence(obj, kNoDexPc, graph_->GetAllocator()); invoke_instruction->GetBlock()->InsertInstructionBefore(constructor_fence, invoke_instruction); } @@ -1539,7 +1539,7 @@ HInstanceFieldGet* HInliner::CreateInstanceFieldGet(uint32_t field_index, ArtField* resolved_field = class_linker->LookupResolvedField(field_index, referrer, /* is_static */ false); DCHECK(resolved_field != nullptr); - HInstanceFieldGet* iget = new (graph_->GetArena()) HInstanceFieldGet( + HInstanceFieldGet* iget = new (graph_->GetAllocator()) HInstanceFieldGet( obj, resolved_field, DataType::FromShorty(resolved_field->GetTypeDescriptor()[0]), @@ -1579,7 +1579,7 @@ HInstanceFieldSet* HInliner::CreateInstanceFieldSet(uint32_t field_index, DCHECK(referrer->IsConstructor()); *is_final = resolved_field->IsFinal(); } - HInstanceFieldSet* iput = new (graph_->GetArena()) HInstanceFieldSet( + HInstanceFieldSet* iput = new (graph_->GetAllocator()) HInstanceFieldSet( obj, value, resolved_field, @@ -1641,8 +1641,9 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction, } const int32_t caller_instruction_counter = graph_->GetCurrentInstructionId(); - HGraph* callee_graph = new (graph_->GetArena()) HGraph( - graph_->GetArena(), + HGraph* callee_graph = new (graph_->GetAllocator()) HGraph( + graph_->GetAllocator(), + graph_->GetArenaStack(), callee_dex_file, method_index, compiler_driver_->GetInstructionSet(), @@ -1659,7 +1660,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction, if (stats_ != nullptr) { // Reuse one object for all inline attempts from this caller to keep Arena memory usage low. if (inline_stats_ == nullptr) { - void* storage = graph_->GetArena()->Alloc<OptimizingCompilerStats>(kArenaAllocMisc); + void* storage = graph_->GetAllocator()->Alloc<OptimizingCompilerStats>(kArenaAllocMisc); inline_stats_ = new (storage) OptimizingCompilerStats; } else { inline_stats_->Reset(); @@ -1672,7 +1673,6 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction, codegen_, inline_stats_, resolved_method->GetQuickenedInfo(class_linker->GetImagePointerSize()), - dex_cache, handles_); if (builder.BuildGraph() != kAnalysisSuccess) { diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc index 6ad8036870..b06d91c823 100644 --- a/compiler/optimizing/instruction_builder.cc +++ b/compiler/optimizing/instruction_builder.cc @@ -59,8 +59,8 @@ ArenaVector<HInstruction*>* HInstructionBuilder::GetLocalsForWithAllocation( // the first throwing instruction. HInstruction* current_local_value = (*current_locals_)[i]; if (current_local_value != nullptr) { - HPhi* phi = new (arena_) HPhi( - arena_, + HPhi* phi = new (allocator_) HPhi( + allocator_, i, 0, current_local_value->GetType()); @@ -109,8 +109,8 @@ void HInstructionBuilder::InitializeBlockLocals() { HInstruction* incoming = ValueOfLocalAt(current_block_->GetLoopInformation()->GetPreHeader(), local); if (incoming != nullptr) { - HPhi* phi = new (arena_) HPhi( - arena_, + HPhi* phi = new (allocator_) HPhi( + allocator_, local, 0, incoming->GetType()); @@ -148,8 +148,8 @@ void HInstructionBuilder::InitializeBlockLocals() { if (is_different) { HInstruction* first_input = ValueOfLocalAt(current_block_->GetPredecessors()[0], local); - HPhi* phi = new (arena_) HPhi( - arena_, + HPhi* phi = new (allocator_) HPhi( + allocator_, local, current_block_->GetPredecessors().size(), first_input->GetType()); @@ -210,8 +210,8 @@ void HInstructionBuilder::InsertInstructionAtTop(HInstruction* instruction) { void HInstructionBuilder::InitializeInstruction(HInstruction* instruction) { if (instruction->NeedsEnvironment()) { - HEnvironment* environment = new (arena_) HEnvironment( - arena_, + HEnvironment* environment = new (allocator_) HEnvironment( + allocator_, current_locals_->size(), graph_->GetArtMethod(), instruction->GetDexPc(), @@ -227,7 +227,7 @@ HInstruction* HInstructionBuilder::LoadNullCheckedLocal(uint32_t register_index, return ref; } - HNullCheck* null_check = new (arena_) HNullCheck(ref, dex_pc); + HNullCheck* null_check = new (allocator_) HNullCheck(ref, dex_pc); AppendInstruction(null_check); return null_check; } @@ -265,7 +265,7 @@ static bool IsBlockPopulated(HBasicBlock* block) { bool HInstructionBuilder::Build() { locals_for_.resize(graph_->GetBlocks().size(), - ArenaVector<HInstruction*>(arena_->Adapter(kArenaAllocGraphBuilder))); + ArenaVector<HInstruction*>(allocator_->Adapter(kArenaAllocGraphBuilder))); // Find locations where we want to generate extra stackmaps for native debugging. // This allows us to generate the info only at interesting points (for example, @@ -275,7 +275,8 @@ bool HInstructionBuilder::Build() { ArenaBitVector* native_debug_info_locations = nullptr; if (native_debuggable) { const uint32_t num_instructions = code_item_.insns_size_in_code_units_; - native_debug_info_locations = new (arena_) ArenaBitVector (arena_, num_instructions, false); + native_debug_info_locations = + new (allocator_) ArenaBitVector (allocator_, num_instructions, false); FindNativeDebugInfoLocations(native_debug_info_locations); } @@ -287,14 +288,14 @@ bool HInstructionBuilder::Build() { if (current_block_->IsEntryBlock()) { InitializeParameters(); - AppendInstruction(new (arena_) HSuspendCheck(0u)); - AppendInstruction(new (arena_) HGoto(0u)); + AppendInstruction(new (allocator_) HSuspendCheck(0u)); + AppendInstruction(new (allocator_) HGoto(0u)); continue; } else if (current_block_->IsExitBlock()) { - AppendInstruction(new (arena_) HExit()); + AppendInstruction(new (allocator_) HExit()); continue; } else if (current_block_->IsLoopHeader()) { - HSuspendCheck* suspend_check = new (arena_) HSuspendCheck(current_block_->GetDexPc()); + HSuspendCheck* suspend_check = new (allocator_) HSuspendCheck(current_block_->GetDexPc()); current_block_->GetLoopInformation()->SetSuspendCheck(suspend_check); // This is slightly odd because the loop header might not be empty (TryBoundary). // But we're still creating the environment with locals from the top of the block. @@ -331,7 +332,7 @@ bool HInstructionBuilder::Build() { } if (native_debuggable && native_debug_info_locations->IsBitSet(dex_pc)) { - AppendInstruction(new (arena_) HNativeDebugInfo(dex_pc)); + AppendInstruction(new (allocator_) HNativeDebugInfo(dex_pc)); } if (!ProcessDexInstruction(it.CurrentInstruction(), dex_pc, quicken_index)) { @@ -348,7 +349,7 @@ bool HInstructionBuilder::Build() { // instruction of the current block is not a branching instruction. // We add an unconditional Goto to the next block. DCHECK_EQ(current_block_->GetSuccessors().size(), 1u); - AppendInstruction(new (arena_) HGoto()); + AppendInstruction(new (allocator_) HGoto()); } } @@ -452,7 +453,7 @@ void HInstructionBuilder::InitializeParameters() { dex_file_->GetMethodId(dex_compilation_unit_->GetDexMethodIndex()); if (!dex_compilation_unit_->IsStatic()) { // Add the implicit 'this' argument, not expressed in the signature. - HParameterValue* parameter = new (arena_) HParameterValue(*dex_file_, + HParameterValue* parameter = new (allocator_) HParameterValue(*dex_file_, referrer_method_id.class_idx_, parameter_index++, DataType::Type::kReference, @@ -468,7 +469,7 @@ void HInstructionBuilder::InitializeParameters() { const DexFile::ProtoId& proto = dex_file_->GetMethodPrototype(referrer_method_id); const DexFile::TypeList* arg_types = dex_file_->GetProtoParameters(proto); for (int i = 0, shorty_pos = 1; i < number_of_parameters; i++) { - HParameterValue* parameter = new (arena_) HParameterValue( + HParameterValue* parameter = new (allocator_) HParameterValue( *dex_file_, arg_types->GetTypeItem(shorty_pos - 1).type_idx_, parameter_index++, @@ -491,18 +492,18 @@ template<typename T> void HInstructionBuilder::If_22t(const Instruction& instruction, uint32_t dex_pc) { HInstruction* first = LoadLocal(instruction.VRegA(), DataType::Type::kInt32); HInstruction* second = LoadLocal(instruction.VRegB(), DataType::Type::kInt32); - T* comparison = new (arena_) T(first, second, dex_pc); + T* comparison = new (allocator_) T(first, second, dex_pc); AppendInstruction(comparison); - AppendInstruction(new (arena_) HIf(comparison, dex_pc)); + AppendInstruction(new (allocator_) HIf(comparison, dex_pc)); current_block_ = nullptr; } template<typename T> void HInstructionBuilder::If_21t(const Instruction& instruction, uint32_t dex_pc) { HInstruction* value = LoadLocal(instruction.VRegA(), DataType::Type::kInt32); - T* comparison = new (arena_) T(value, graph_->GetIntConstant(0, dex_pc), dex_pc); + T* comparison = new (allocator_) T(value, graph_->GetIntConstant(0, dex_pc), dex_pc); AppendInstruction(comparison); - AppendInstruction(new (arena_) HIf(comparison, dex_pc)); + AppendInstruction(new (allocator_) HIf(comparison, dex_pc)); current_block_ = nullptr; } @@ -511,7 +512,7 @@ void HInstructionBuilder::Unop_12x(const Instruction& instruction, DataType::Type type, uint32_t dex_pc) { HInstruction* first = LoadLocal(instruction.VRegB(), type); - AppendInstruction(new (arena_) T(type, first, dex_pc)); + AppendInstruction(new (allocator_) T(type, first, dex_pc)); UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction()); } @@ -520,7 +521,7 @@ void HInstructionBuilder::Conversion_12x(const Instruction& instruction, DataType::Type result_type, uint32_t dex_pc) { HInstruction* first = LoadLocal(instruction.VRegB(), input_type); - AppendInstruction(new (arena_) HTypeConversion(result_type, first, dex_pc)); + AppendInstruction(new (allocator_) HTypeConversion(result_type, first, dex_pc)); UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction()); } @@ -530,7 +531,7 @@ void HInstructionBuilder::Binop_23x(const Instruction& instruction, uint32_t dex_pc) { HInstruction* first = LoadLocal(instruction.VRegB(), type); HInstruction* second = LoadLocal(instruction.VRegC(), type); - AppendInstruction(new (arena_) T(type, first, second, dex_pc)); + AppendInstruction(new (allocator_) T(type, first, second, dex_pc)); UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction()); } @@ -540,7 +541,7 @@ void HInstructionBuilder::Binop_23x_shift(const Instruction& instruction, uint32_t dex_pc) { HInstruction* first = LoadLocal(instruction.VRegB(), type); HInstruction* second = LoadLocal(instruction.VRegC(), DataType::Type::kInt32); - AppendInstruction(new (arena_) T(type, first, second, dex_pc)); + AppendInstruction(new (allocator_) T(type, first, second, dex_pc)); UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction()); } @@ -550,7 +551,7 @@ void HInstructionBuilder::Binop_23x_cmp(const Instruction& instruction, uint32_t dex_pc) { HInstruction* first = LoadLocal(instruction.VRegB(), type); HInstruction* second = LoadLocal(instruction.VRegC(), type); - AppendInstruction(new (arena_) HCompare(type, first, second, bias, dex_pc)); + AppendInstruction(new (allocator_) HCompare(type, first, second, bias, dex_pc)); UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction()); } @@ -560,7 +561,7 @@ void HInstructionBuilder::Binop_12x_shift(const Instruction& instruction, uint32_t dex_pc) { HInstruction* first = LoadLocal(instruction.VRegA(), type); HInstruction* second = LoadLocal(instruction.VRegB(), DataType::Type::kInt32); - AppendInstruction(new (arena_) T(type, first, second, dex_pc)); + AppendInstruction(new (allocator_) T(type, first, second, dex_pc)); UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction()); } @@ -570,7 +571,7 @@ void HInstructionBuilder::Binop_12x(const Instruction& instruction, uint32_t dex_pc) { HInstruction* first = LoadLocal(instruction.VRegA(), type); HInstruction* second = LoadLocal(instruction.VRegB(), type); - AppendInstruction(new (arena_) T(type, first, second, dex_pc)); + AppendInstruction(new (allocator_) T(type, first, second, dex_pc)); UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction()); } @@ -581,7 +582,7 @@ void HInstructionBuilder::Binop_22s(const Instruction& instruction, bool reverse if (reverse) { std::swap(first, second); } - AppendInstruction(new (arena_) T(DataType::Type::kInt32, first, second, dex_pc)); + AppendInstruction(new (allocator_) T(DataType::Type::kInt32, first, second, dex_pc)); UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction()); } @@ -592,7 +593,7 @@ void HInstructionBuilder::Binop_22b(const Instruction& instruction, bool reverse if (reverse) { std::swap(first, second); } - AppendInstruction(new (arena_) T(DataType::Type::kInt32, first, second, dex_pc)); + AppendInstruction(new (allocator_) T(DataType::Type::kInt32, first, second, dex_pc)); UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction()); } @@ -630,13 +631,13 @@ void HInstructionBuilder::BuildSwitch(const Instruction& instruction, uint32_t d if (table.GetNumEntries() == 0) { // Empty Switch. Code falls through to the next block. DCHECK(IsFallthroughInstruction(instruction, dex_pc, current_block_)); - AppendInstruction(new (arena_) HGoto(dex_pc)); + AppendInstruction(new (allocator_) HGoto(dex_pc)); } else if (table.ShouldBuildDecisionTree()) { for (DexSwitchTableIterator it(table); !it.Done(); it.Advance()) { HInstruction* case_value = graph_->GetIntConstant(it.CurrentKey(), dex_pc); - HEqual* comparison = new (arena_) HEqual(value, case_value, dex_pc); + HEqual* comparison = new (allocator_) HEqual(value, case_value, dex_pc); AppendInstruction(comparison); - AppendInstruction(new (arena_) HIf(comparison, dex_pc)); + AppendInstruction(new (allocator_) HIf(comparison, dex_pc)); if (!it.IsLast()) { current_block_ = FindBlockStartingAt(it.GetDexPcForCurrentIndex()); @@ -644,7 +645,7 @@ void HInstructionBuilder::BuildSwitch(const Instruction& instruction, uint32_t d } } else { AppendInstruction( - new (arena_) HPackedSwitch(table.GetEntryAt(0), table.GetNumEntries(), value, dex_pc)); + new (allocator_) HPackedSwitch(table.GetEntryAt(0), table.GetNumEntries(), value, dex_pc)); } current_block_ = nullptr; @@ -664,16 +665,16 @@ void HInstructionBuilder::BuildReturn(const Instruction& instruction, HInstruction* fence_target = current_this_parameter_; DCHECK(fence_target != nullptr); - AppendInstruction(new (arena_) HConstructorFence(fence_target, dex_pc, arena_)); + AppendInstruction(new (allocator_) HConstructorFence(fence_target, dex_pc, allocator_)); MaybeRecordStat( compilation_stats_, MethodCompilationStat::kConstructorFenceGeneratedFinal); } - AppendInstruction(new (arena_) HReturnVoid(dex_pc)); + AppendInstruction(new (allocator_) HReturnVoid(dex_pc)); } else { DCHECK(!RequiresConstructorBarrier(dex_compilation_unit_, compiler_driver_)); HInstruction* value = LoadLocal(instruction.VRegA(), type); - AppendInstruction(new (arena_) HReturn(value, dex_pc)); + AppendInstruction(new (allocator_) HReturn(value, dex_pc)); } current_block_ = nullptr; } @@ -816,12 +817,12 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction, if (UNLIKELY(resolved_method == nullptr)) { MaybeRecordStat(compilation_stats_, MethodCompilationStat::kUnresolvedMethod); - HInvoke* invoke = new (arena_) HInvokeUnresolved(arena_, - number_of_arguments, - return_type, - dex_pc, - method_idx, - invoke_type); + HInvoke* invoke = new (allocator_) HInvokeUnresolved(allocator_, + number_of_arguments, + return_type, + dex_pc, + method_idx, + invoke_type); return HandleInvoke(invoke, number_of_vreg_arguments, args, @@ -841,8 +842,8 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction, dchecked_integral_cast<uint64_t>(string_init_entry_point) }; MethodReference target_method(dex_file_, method_idx); - HInvoke* invoke = new (arena_) HInvokeStaticOrDirect( - arena_, + HInvoke* invoke = new (allocator_) HInvokeStaticOrDirect( + allocator_, number_of_arguments - 1, DataType::Type::kReference /*return_type */, dex_pc, @@ -887,35 +888,35 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction, }; MethodReference target_method(resolved_method->GetDexFile(), resolved_method->GetDexMethodIndex()); - invoke = new (arena_) HInvokeStaticOrDirect(arena_, - number_of_arguments, - return_type, - dex_pc, - method_idx, - resolved_method, - dispatch_info, - invoke_type, - target_method, - clinit_check_requirement); + invoke = new (allocator_) HInvokeStaticOrDirect(allocator_, + number_of_arguments, + return_type, + dex_pc, + method_idx, + resolved_method, + dispatch_info, + invoke_type, + target_method, + clinit_check_requirement); } else if (invoke_type == kVirtual) { ScopedObjectAccess soa(Thread::Current()); // Needed for the method index - invoke = new (arena_) HInvokeVirtual(arena_, - number_of_arguments, - return_type, - dex_pc, - method_idx, - resolved_method, - resolved_method->GetMethodIndex()); + invoke = new (allocator_) HInvokeVirtual(allocator_, + number_of_arguments, + return_type, + dex_pc, + method_idx, + resolved_method, + resolved_method->GetMethodIndex()); } else { DCHECK_EQ(invoke_type, kInterface); ScopedObjectAccess soa(Thread::Current()); // Needed for the IMT index. - invoke = new (arena_) HInvokeInterface(arena_, - number_of_arguments, - return_type, - dex_pc, - method_idx, - resolved_method, - ImTable::GetImtIndex(resolved_method)); + invoke = new (allocator_) HInvokeInterface(allocator_, + number_of_arguments, + return_type, + dex_pc, + method_idx, + resolved_method, + ImTable::GetImtIndex(resolved_method)); } return HandleInvoke(invoke, @@ -940,11 +941,11 @@ bool HInstructionBuilder::BuildInvokePolymorphic(const Instruction& instruction DCHECK_EQ(1 + ArtMethod::NumArgRegisters(descriptor), number_of_vreg_arguments); DataType::Type return_type = DataType::FromShorty(descriptor[0]); size_t number_of_arguments = strlen(descriptor); - HInvoke* invoke = new (arena_) HInvokePolymorphic(arena_, - number_of_arguments, - return_type, - dex_pc, - method_idx); + HInvoke* invoke = new (allocator_) HInvokePolymorphic(allocator_, + number_of_arguments, + return_type, + dex_pc, + method_idx); return HandleInvoke(invoke, number_of_vreg_arguments, args, @@ -964,7 +965,7 @@ HNewInstance* HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, u Handle<mirror::Class> klass = load_class->GetClass(); if (!IsInitialized(klass)) { - cls = new (arena_) HClinitCheck(load_class, dex_pc); + cls = new (allocator_) HClinitCheck(load_class, dex_pc); AppendInstruction(cls); } @@ -979,7 +980,7 @@ HNewInstance* HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, u // Consider classes we haven't resolved as potentially finalizable. bool finalizable = (klass == nullptr) || klass->IsFinalizable(); - HNewInstance* new_instance = new (arena_) HNewInstance( + HNewInstance* new_instance = new (allocator_) HNewInstance( cls, dex_pc, type_index, @@ -1036,7 +1037,7 @@ void HInstructionBuilder::BuildConstructorFenceForAllocation(HInstruction* alloc // (and in theory the 0-initializing, but that happens automatically // when new memory pages are mapped in by the OS). HConstructorFence* ctor_fence = - new (arena_) HConstructorFence(allocation, allocation->GetDexPc(), arena_); + new (allocator_) HConstructorFence(allocation, allocation->GetDexPc(), allocator_); AppendInstruction(ctor_fence); MaybeRecordStat( compilation_stats_, @@ -1090,7 +1091,7 @@ HClinitCheck* HInstructionBuilder::ProcessClinitCheckForInvoke( /* needs_access_check */ false); if (cls != nullptr) { *clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit; - clinit_check = new (arena_) HClinitCheck(cls, dex_pc); + clinit_check = new (allocator_) HClinitCheck(cls, dex_pc); AppendInstruction(clinit_check); } } @@ -1290,23 +1291,23 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio if (resolved_field == nullptr) { MaybeRecordStat(compilation_stats_, MethodCompilationStat::kUnresolvedField); - field_set = new (arena_) HUnresolvedInstanceFieldSet(object, - value, - field_type, - field_index, - dex_pc); + field_set = new (allocator_) HUnresolvedInstanceFieldSet(object, + value, + field_type, + field_index, + dex_pc); } else { uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex(); - field_set = new (arena_) HInstanceFieldSet(object, - value, - resolved_field, - field_type, - resolved_field->GetOffset(), - resolved_field->IsVolatile(), - field_index, - class_def_index, - *dex_file_, - dex_pc); + field_set = new (allocator_) HInstanceFieldSet(object, + value, + resolved_field, + field_type, + resolved_field->GetOffset(), + resolved_field->IsVolatile(), + field_index, + class_def_index, + *dex_file_, + dex_pc); } AppendInstruction(field_set); } else { @@ -1314,21 +1315,21 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio if (resolved_field == nullptr) { MaybeRecordStat(compilation_stats_, MethodCompilationStat::kUnresolvedField); - field_get = new (arena_) HUnresolvedInstanceFieldGet(object, - field_type, - field_index, - dex_pc); + field_get = new (allocator_) HUnresolvedInstanceFieldGet(object, + field_type, + field_index, + dex_pc); } else { uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex(); - field_get = new (arena_) HInstanceFieldGet(object, - resolved_field, - field_type, - resolved_field->GetOffset(), - resolved_field->IsVolatile(), - field_index, - class_def_index, - *dex_file_, - dex_pc); + field_get = new (allocator_) HInstanceFieldGet(object, + resolved_field, + field_type, + resolved_field->GetOffset(), + resolved_field->IsVolatile(), + field_index, + class_def_index, + *dex_file_, + dex_pc); } AppendInstruction(field_get); UpdateLocal(source_or_dest_reg, field_get); @@ -1382,9 +1383,9 @@ void HInstructionBuilder::BuildUnresolvedStaticFieldAccess(const Instruction& in if (is_put) { HInstruction* value = LoadLocal(source_or_dest_reg, field_type); AppendInstruction( - new (arena_) HUnresolvedStaticFieldSet(value, field_type, field_index, dex_pc)); + new (allocator_) HUnresolvedStaticFieldSet(value, field_type, field_index, dex_pc)); } else { - AppendInstruction(new (arena_) HUnresolvedStaticFieldGet(field_type, field_index, dex_pc)); + AppendInstruction(new (allocator_) HUnresolvedStaticFieldGet(field_type, field_index, dex_pc)); UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction()); } } @@ -1475,7 +1476,7 @@ bool HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction, HInstruction* cls = constant; if (!IsInitialized(klass)) { - cls = new (arena_) HClinitCheck(constant, dex_pc); + cls = new (allocator_) HClinitCheck(constant, dex_pc); AppendInstruction(cls); } @@ -1484,38 +1485,38 @@ bool HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction, // We need to keep the class alive before loading the value. HInstruction* value = LoadLocal(source_or_dest_reg, field_type); DCHECK_EQ(HPhi::ToPhiType(value->GetType()), HPhi::ToPhiType(field_type)); - AppendInstruction(new (arena_) HStaticFieldSet(cls, - value, - resolved_field, - field_type, - resolved_field->GetOffset(), - resolved_field->IsVolatile(), - field_index, - class_def_index, - *dex_file_, - dex_pc)); + AppendInstruction(new (allocator_) HStaticFieldSet(cls, + value, + resolved_field, + field_type, + resolved_field->GetOffset(), + resolved_field->IsVolatile(), + field_index, + class_def_index, + *dex_file_, + dex_pc)); } else { - AppendInstruction(new (arena_) HStaticFieldGet(cls, - resolved_field, - field_type, - resolved_field->GetOffset(), - resolved_field->IsVolatile(), - field_index, - class_def_index, - *dex_file_, - dex_pc)); + AppendInstruction(new (allocator_) HStaticFieldGet(cls, + resolved_field, + field_type, + resolved_field->GetOffset(), + resolved_field->IsVolatile(), + field_index, + class_def_index, + *dex_file_, + dex_pc)); UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction()); } return true; } void HInstructionBuilder::BuildCheckedDivRem(uint16_t out_vreg, - uint16_t first_vreg, - int64_t second_vreg_or_constant, - uint32_t dex_pc, - DataType::Type type, - bool second_is_constant, - bool isDiv) { + uint16_t first_vreg, + int64_t second_vreg_or_constant, + uint32_t dex_pc, + DataType::Type type, + bool second_is_constant, + bool isDiv) { DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64); HInstruction* first = LoadLocal(first_vreg, type); @@ -1533,14 +1534,14 @@ void HInstructionBuilder::BuildCheckedDivRem(uint16_t out_vreg, if (!second_is_constant || (type == DataType::Type::kInt32 && second->AsIntConstant()->GetValue() == 0) || (type == DataType::Type::kInt64 && second->AsLongConstant()->GetValue() == 0)) { - second = new (arena_) HDivZeroCheck(second, dex_pc); + second = new (allocator_) HDivZeroCheck(second, dex_pc); AppendInstruction(second); } if (isDiv) { - AppendInstruction(new (arena_) HDiv(type, first, second, dex_pc)); + AppendInstruction(new (allocator_) HDiv(type, first, second, dex_pc)); } else { - AppendInstruction(new (arena_) HRem(type, first, second, dex_pc)); + AppendInstruction(new (allocator_) HRem(type, first, second, dex_pc)); } UpdateLocal(out_vreg, current_block_->GetLastInstruction()); } @@ -1554,19 +1555,19 @@ void HInstructionBuilder::BuildArrayAccess(const Instruction& instruction, uint8_t index_reg = instruction.VRegC_23x(); HInstruction* object = LoadNullCheckedLocal(array_reg, dex_pc); - HInstruction* length = new (arena_) HArrayLength(object, dex_pc); + HInstruction* length = new (allocator_) HArrayLength(object, dex_pc); AppendInstruction(length); HInstruction* index = LoadLocal(index_reg, DataType::Type::kInt32); - index = new (arena_) HBoundsCheck(index, length, dex_pc); + index = new (allocator_) HBoundsCheck(index, length, dex_pc); AppendInstruction(index); if (is_put) { HInstruction* value = LoadLocal(source_or_dest_reg, anticipated_type); // TODO: Insert a type check node if the type is Object. - HArraySet* aset = new (arena_) HArraySet(object, index, value, anticipated_type, dex_pc); + HArraySet* aset = new (allocator_) HArraySet(object, index, value, anticipated_type, dex_pc); ssa_builder_->MaybeAddAmbiguousArraySet(aset); AppendInstruction(aset); } else { - HArrayGet* aget = new (arena_) HArrayGet(object, index, anticipated_type, dex_pc); + HArrayGet* aget = new (allocator_) HArrayGet(object, index, anticipated_type, dex_pc); ssa_builder_->MaybeAddAmbiguousArrayGet(aget); AppendInstruction(aget); UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction()); @@ -1582,7 +1583,7 @@ HNewArray* HInstructionBuilder::BuildFilledNewArray(uint32_t dex_pc, uint32_t register_index) { HInstruction* length = graph_->GetIntConstant(number_of_vreg_arguments, dex_pc); HLoadClass* cls = BuildLoadClass(type_index, dex_pc); - HNewArray* const object = new (arena_) HNewArray(cls, length, dex_pc); + HNewArray* const object = new (allocator_) HNewArray(cls, length, dex_pc); AppendInstruction(object); const char* descriptor = dex_file_->StringByTypeIdx(type_index); @@ -1597,7 +1598,7 @@ HNewArray* HInstructionBuilder::BuildFilledNewArray(uint32_t dex_pc, for (size_t i = 0; i < number_of_vreg_arguments; ++i) { HInstruction* value = LoadLocal(is_range ? register_index + i : args[i], type); HInstruction* index = graph_->GetIntConstant(i, dex_pc); - HArraySet* aset = new (arena_) HArraySet(object, index, value, type, dex_pc); + HArraySet* aset = new (allocator_) HArraySet(object, index, value, type, dex_pc); ssa_builder_->MaybeAddAmbiguousArraySet(aset); AppendInstruction(aset); } @@ -1615,7 +1616,7 @@ void HInstructionBuilder::BuildFillArrayData(HInstruction* object, for (uint32_t i = 0; i < element_count; ++i) { HInstruction* index = graph_->GetIntConstant(i, dex_pc); HInstruction* value = graph_->GetIntConstant(data[i], dex_pc); - HArraySet* aset = new (arena_) HArraySet(object, index, value, anticipated_type, dex_pc); + HArraySet* aset = new (allocator_) HArraySet(object, index, value, anticipated_type, dex_pc); ssa_builder_->MaybeAddAmbiguousArraySet(aset); AppendInstruction(aset); } @@ -1635,13 +1636,13 @@ void HInstructionBuilder::BuildFillArrayData(const Instruction& instruction, uin return; } - HInstruction* length = new (arena_) HArrayLength(array, dex_pc); + HInstruction* length = new (allocator_) HArrayLength(array, dex_pc); AppendInstruction(length); // Implementation of this DEX instruction seems to be that the bounds check is // done before doing any stores. HInstruction* last_index = graph_->GetIntConstant(payload->element_count - 1, dex_pc); - AppendInstruction(new (arena_) HBoundsCheck(last_index, length, dex_pc)); + AppendInstruction(new (allocator_) HBoundsCheck(last_index, length, dex_pc)); switch (payload->element_width) { case 1: @@ -1684,7 +1685,8 @@ void HInstructionBuilder::BuildFillWideArrayData(HInstruction* object, for (uint32_t i = 0; i < element_count; ++i) { HInstruction* index = graph_->GetIntConstant(i, dex_pc); HInstruction* value = graph_->GetLongConstant(data[i], dex_pc); - HArraySet* aset = new (arena_) HArraySet(object, index, value, DataType::Type::kInt64, dex_pc); + HArraySet* aset = + new (allocator_) HArraySet(object, index, value, DataType::Type::kInt64, dex_pc); ssa_builder_->MaybeAddAmbiguousArraySet(aset); AppendInstruction(aset); } @@ -1752,7 +1754,7 @@ HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index, } // Note: `klass` must be from `handles_`. - HLoadClass* load_class = new (arena_) HLoadClass( + HLoadClass* load_class = new (allocator_) HLoadClass( graph_->GetCurrentMethod(), type_index, *actual_dex_file, @@ -1787,15 +1789,15 @@ void HInstructionBuilder::BuildTypeCheck(const Instruction& instruction, ScopedObjectAccess soa(Thread::Current()); TypeCheckKind check_kind = ComputeTypeCheckKind(cls->GetClass()); if (instruction.Opcode() == Instruction::INSTANCE_OF) { - AppendInstruction(new (arena_) HInstanceOf(object, cls, check_kind, dex_pc)); + AppendInstruction(new (allocator_) HInstanceOf(object, cls, check_kind, dex_pc)); UpdateLocal(destination, current_block_->GetLastInstruction()); } else { DCHECK_EQ(instruction.Opcode(), Instruction::CHECK_CAST); // We emit a CheckCast followed by a BoundType. CheckCast is a statement // which may throw. If it succeeds BoundType sets the new type of `object` // for all subsequent uses. - AppendInstruction(new (arena_) HCheckCast(object, cls, check_kind, dex_pc)); - AppendInstruction(new (arena_) HBoundType(object, dex_pc)); + AppendInstruction(new (allocator_) HCheckCast(object, cls, check_kind, dex_pc)); + AppendInstruction(new (allocator_) HBoundType(object, dex_pc)); UpdateLocal(reference, current_block_->GetLastInstruction()); } } @@ -1943,7 +1945,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction, case Instruction::GOTO: case Instruction::GOTO_16: case Instruction::GOTO_32: { - AppendInstruction(new (arena_) HGoto(dex_pc)); + AppendInstruction(new (allocator_) HGoto(dex_pc)); current_block_ = nullptr; break; } @@ -2580,7 +2582,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction, HInstruction* length = LoadLocal(instruction.VRegB_22c(), DataType::Type::kInt32); HLoadClass* cls = BuildLoadClass(type_index, dex_pc); - HNewArray* new_array = new (arena_) HNewArray(cls, length, dex_pc); + HNewArray* new_array = new (allocator_) HNewArray(cls, length, dex_pc); AppendInstruction(new_array); UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction()); BuildConstructorFenceForAllocation(new_array); @@ -2744,23 +2746,27 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction, case Instruction::ARRAY_LENGTH: { HInstruction* object = LoadNullCheckedLocal(instruction.VRegB_12x(), dex_pc); - AppendInstruction(new (arena_) HArrayLength(object, dex_pc)); + AppendInstruction(new (allocator_) HArrayLength(object, dex_pc)); UpdateLocal(instruction.VRegA_12x(), current_block_->GetLastInstruction()); break; } case Instruction::CONST_STRING: { dex::StringIndex string_index(instruction.VRegB_21c()); - AppendInstruction( - new (arena_) HLoadString(graph_->GetCurrentMethod(), string_index, *dex_file_, dex_pc)); + AppendInstruction(new (allocator_) HLoadString(graph_->GetCurrentMethod(), + string_index, + *dex_file_, + dex_pc)); UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction()); break; } case Instruction::CONST_STRING_JUMBO: { dex::StringIndex string_index(instruction.VRegB_31c()); - AppendInstruction( - new (arena_) HLoadString(graph_->GetCurrentMethod(), string_index, *dex_file_, dex_pc)); + AppendInstruction(new (allocator_) HLoadString(graph_->GetCurrentMethod(), + string_index, + *dex_file_, + dex_pc)); UpdateLocal(instruction.VRegA_31c(), current_block_->GetLastInstruction()); break; } @@ -2773,15 +2779,15 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction, } case Instruction::MOVE_EXCEPTION: { - AppendInstruction(new (arena_) HLoadException(dex_pc)); + AppendInstruction(new (allocator_) HLoadException(dex_pc)); UpdateLocal(instruction.VRegA_11x(), current_block_->GetLastInstruction()); - AppendInstruction(new (arena_) HClearException(dex_pc)); + AppendInstruction(new (allocator_) HClearException(dex_pc)); break; } case Instruction::THROW: { HInstruction* exception = LoadLocal(instruction.VRegA_11x(), DataType::Type::kReference); - AppendInstruction(new (arena_) HThrow(exception, dex_pc)); + AppendInstruction(new (allocator_) HThrow(exception, dex_pc)); // We finished building this block. Set the current block to null to avoid // adding dead instructions to it. current_block_ = nullptr; @@ -2804,7 +2810,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction, } case Instruction::MONITOR_ENTER: { - AppendInstruction(new (arena_) HMonitorOperation( + AppendInstruction(new (allocator_) HMonitorOperation( LoadLocal(instruction.VRegA_11x(), DataType::Type::kReference), HMonitorOperation::OperationKind::kEnter, dex_pc)); @@ -2812,7 +2818,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction, } case Instruction::MONITOR_EXIT: { - AppendInstruction(new (arena_) HMonitorOperation( + AppendInstruction(new (allocator_) HMonitorOperation( LoadLocal(instruction.VRegA_11x(), DataType::Type::kReference), HMonitorOperation::OperationKind::kExit, dex_pc)); diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h index a684bf40e6..79d6ddc87d 100644 --- a/compiler/optimizing/instruction_builder.h +++ b/compiler/optimizing/instruction_builder.h @@ -43,15 +43,15 @@ class HInstructionBuilder : public ValueObject { const DexFile* dex_file, const DexFile::CodeItem& code_item, DataType::Type return_type, - DexCompilationUnit* dex_compilation_unit, - const DexCompilationUnit* const outer_compilation_unit, + const DexCompilationUnit* dex_compilation_unit, + const DexCompilationUnit* outer_compilation_unit, CompilerDriver* driver, CodeGenerator* code_generator, const uint8_t* interpreter_metadata, OptimizingCompilerStats* compiler_stats, Handle<mirror::DexCache> dex_cache, VariableSizedHandleScope* handles) - : arena_(graph->GetArena()), + : allocator_(graph->GetAllocator()), graph_(graph), handles_(handles), dex_file_(dex_file), @@ -59,7 +59,7 @@ class HInstructionBuilder : public ValueObject { return_type_(return_type), block_builder_(block_builder), ssa_builder_(ssa_builder), - locals_for_(arena_->Adapter(kArenaAllocGraphBuilder)), + locals_for_(allocator_->Adapter(kArenaAllocGraphBuilder)), current_block_(nullptr), current_locals_(nullptr), latest_result_(nullptr), @@ -71,7 +71,7 @@ class HInstructionBuilder : public ValueObject { quicken_info_(interpreter_metadata), compilation_stats_(compiler_stats), dex_cache_(dex_cache), - loop_headers_(graph->GetArena()->Adapter(kArenaAllocGraphBuilder)) { + loop_headers_(allocator_->Adapter(kArenaAllocGraphBuilder)) { loop_headers_.reserve(kDefaultNumberOfLoops); } @@ -312,7 +312,7 @@ class HInstructionBuilder : public ValueObject { ObjPtr<mirror::Class> LookupReferrerClass() const REQUIRES_SHARED(Locks::mutator_lock_); - ArenaAllocator* const arena_; + ArenaAllocator* const allocator_; HGraph* const graph_; VariableSizedHandleScope* handles_; @@ -342,7 +342,7 @@ class HInstructionBuilder : public ValueObject { // The compilation unit of the current method being compiled. Note that // it can be an inlined method. - DexCompilationUnit* const dex_compilation_unit_; + const DexCompilationUnit* const dex_compilation_unit_; // The compilation unit of the outermost method being compiled. That is the // method being compiled (and not inlined), and potentially inlining other diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc index 6610bcc713..f39acab3d7 100644 --- a/compiler/optimizing/instruction_simplifier.cc +++ b/compiler/optimizing/instruction_simplifier.cc @@ -186,7 +186,7 @@ bool InstructionSimplifierVisitor::TryMoveNegOnInputsAfterBinop(HBinaryOperation binop->ReplaceInput(right_neg->GetInput(), 1); left_neg->GetBlock()->RemoveInstruction(left_neg); right_neg->GetBlock()->RemoveInstruction(right_neg); - HNeg* neg = new (GetGraph()->GetArena()) HNeg(binop->GetType(), binop); + HNeg* neg = new (GetGraph()->GetAllocator()) HNeg(binop->GetType(), binop); binop->GetBlock()->InsertInstructionBefore(neg, binop->GetNext()); binop->ReplaceWithExceptInReplacementAtIndex(neg, 0); RecordSimplification(); @@ -225,15 +225,15 @@ bool InstructionSimplifierVisitor::TryDeMorganNegationFactoring(HBinaryOperation // Replace the `HAnd` or `HOr`. HBinaryOperation* hbin; if (op->IsAnd()) { - hbin = new (GetGraph()->GetArena()) HOr(type, src_left, src_right, dex_pc); + hbin = new (GetGraph()->GetAllocator()) HOr(type, src_left, src_right, dex_pc); } else { - hbin = new (GetGraph()->GetArena()) HAnd(type, src_left, src_right, dex_pc); + hbin = new (GetGraph()->GetAllocator()) HAnd(type, src_left, src_right, dex_pc); } HInstruction* hnot; if (left->IsBooleanNot()) { - hnot = new (GetGraph()->GetArena()) HBooleanNot(hbin, dex_pc); + hnot = new (GetGraph()->GetAllocator()) HBooleanNot(hbin, dex_pc); } else { - hnot = new (GetGraph()->GetArena()) HNot(type, hbin, dex_pc); + hnot = new (GetGraph()->GetAllocator()) HNot(type, hbin, dex_pc); } op->GetBlock()->InsertInstructionBefore(hbin, op); @@ -274,7 +274,7 @@ bool InstructionSimplifierVisitor::TryCombineVecMultiplyAccumulate(HVecMul* mul) return false; } - ArenaAllocator* arena = mul->GetBlock()->GetGraph()->GetArena(); + ArenaAllocator* allocator = mul->GetBlock()->GetGraph()->GetAllocator(); if (mul->HasOnlyOneNonEnvironmentUse()) { HInstruction* use = mul->GetUses().front().GetUser(); @@ -307,14 +307,14 @@ bool InstructionSimplifierVisitor::TryCombineVecMultiplyAccumulate(HVecMul* mul) use->IsVecAdd() ? HInstruction::kAdd : HInstruction::kSub; if (accumulator != nullptr) { HVecMultiplyAccumulate* mulacc = - new (arena) HVecMultiplyAccumulate(arena, - kind, - accumulator, - mul->GetLeft(), - mul->GetRight(), - binop->GetPackedType(), - binop->GetVectorLength(), - binop->GetDexPc()); + new (allocator) HVecMultiplyAccumulate(allocator, + kind, + accumulator, + mul->GetLeft(), + mul->GetRight(), + binop->GetPackedType(), + binop->GetVectorLength(), + binop->GetDexPc()); binop->GetBlock()->ReplaceAndRemoveInstructionWith(binop, mulacc); DCHECK(!mul->HasUses()); @@ -407,7 +407,8 @@ bool InstructionSimplifierVisitor::ReplaceRotateWithRor(HBinaryOperation* op, HUShr* ushr, HShl* shl) { DCHECK(op->IsAdd() || op->IsXor() || op->IsOr()) << op->DebugName(); - HRor* ror = new (GetGraph()->GetArena()) HRor(ushr->GetType(), ushr->GetLeft(), ushr->GetRight()); + HRor* ror = + new (GetGraph()->GetAllocator()) HRor(ushr->GetType(), ushr->GetLeft(), ushr->GetRight()); op->GetBlock()->ReplaceAndRemoveInstructionWith(op, ror); if (!ushr->HasUses()) { ushr->GetBlock()->RemoveInstruction(ushr); @@ -667,7 +668,7 @@ void InstructionSimplifierVisitor::VisitInstanceOf(HInstanceOf* instruction) { MaybeRecordStat(stats_, kRemovedInstanceOf); if (outcome && can_be_null) { // Type test will succeed, we just need a null test. - HNotEqual* test = new (graph->GetArena()) HNotEqual(graph->GetNullConstant(), object); + HNotEqual* test = new (graph->GetAllocator()) HNotEqual(graph->GetNullConstant(), object); instruction->GetBlock()->InsertInstructionBefore(test, instruction); instruction->ReplaceWith(test); } else { @@ -699,30 +700,30 @@ void InstructionSimplifierVisitor::VisitStaticFieldSet(HStaticFieldSet* instruct } } -static HCondition* GetOppositeConditionSwapOps(ArenaAllocator* arena, HInstruction* cond) { +static HCondition* GetOppositeConditionSwapOps(ArenaAllocator* allocator, HInstruction* cond) { HInstruction *lhs = cond->InputAt(0); HInstruction *rhs = cond->InputAt(1); switch (cond->GetKind()) { case HInstruction::kEqual: - return new (arena) HEqual(rhs, lhs); + return new (allocator) HEqual(rhs, lhs); case HInstruction::kNotEqual: - return new (arena) HNotEqual(rhs, lhs); + return new (allocator) HNotEqual(rhs, lhs); case HInstruction::kLessThan: - return new (arena) HGreaterThan(rhs, lhs); + return new (allocator) HGreaterThan(rhs, lhs); case HInstruction::kLessThanOrEqual: - return new (arena) HGreaterThanOrEqual(rhs, lhs); + return new (allocator) HGreaterThanOrEqual(rhs, lhs); case HInstruction::kGreaterThan: - return new (arena) HLessThan(rhs, lhs); + return new (allocator) HLessThan(rhs, lhs); case HInstruction::kGreaterThanOrEqual: - return new (arena) HLessThanOrEqual(rhs, lhs); + return new (allocator) HLessThanOrEqual(rhs, lhs); case HInstruction::kBelow: - return new (arena) HAbove(rhs, lhs); + return new (allocator) HAbove(rhs, lhs); case HInstruction::kBelowOrEqual: - return new (arena) HAboveOrEqual(rhs, lhs); + return new (allocator) HAboveOrEqual(rhs, lhs); case HInstruction::kAbove: - return new (arena) HBelow(rhs, lhs); + return new (allocator) HBelow(rhs, lhs); case HInstruction::kAboveOrEqual: - return new (arena) HBelowOrEqual(rhs, lhs); + return new (allocator) HBelowOrEqual(rhs, lhs); default: LOG(FATAL) << "Unknown ConditionType " << cond->GetKind(); } @@ -836,7 +837,9 @@ void InstructionSimplifierVisitor::VisitBooleanNot(HBooleanNot* bool_not) { } // Constructs a new ABS(x) node in the HIR. -static HInstruction* NewIntegralAbs(ArenaAllocator* arena, HInstruction* x, HInstruction* cursor) { +static HInstruction* NewIntegralAbs(ArenaAllocator* allocator, + HInstruction* x, + HInstruction* cursor) { DataType::Type type = x->GetType(); DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64); // Construct a fake intrinsic with as much context as is needed to allocate one. @@ -847,8 +850,8 @@ static HInstruction* NewIntegralAbs(ArenaAllocator* arena, HInstruction* x, HIns HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod, 0u }; - HInvokeStaticOrDirect* invoke = new (arena) HInvokeStaticOrDirect( - arena, + HInvokeStaticOrDirect* invoke = new (allocator) HInvokeStaticOrDirect( + allocator, 1, type, x->GetDexPc(), @@ -939,14 +942,14 @@ void InstructionSimplifierVisitor::VisitSelect(HSelect* select) { if ((cmp == kCondLT || cmp == kCondLE) && (a == negated && a == false_value && IsInt64Value(b, 0))) { // Found a < 0 ? -a : a which can be replaced by ABS(a). - replace_with = NewIntegralAbs(GetGraph()->GetArena(), false_value, select); + replace_with = NewIntegralAbs(GetGraph()->GetAllocator(), false_value, select); } } else if (false_value->IsNeg()) { HInstruction* negated = false_value->InputAt(0); if ((cmp == kCondGT || cmp == kCondGE) && (a == true_value && a == negated && IsInt64Value(b, 0))) { // Found a > 0 ? a : -a which can be replaced by ABS(a). - replace_with = NewIntegralAbs(GetGraph()->GetArena(), true_value, select); + replace_with = NewIntegralAbs(GetGraph()->GetAllocator(), true_value, select); } } else if (true_value->IsSub() && false_value->IsSub()) { HInstruction* true_sub1 = true_value->InputAt(0); @@ -961,7 +964,7 @@ void InstructionSimplifierVisitor::VisitSelect(HSelect* select) { // Found a > b ? a - b : b - a or // a < b ? b - a : a - b // which can be replaced by ABS(a - b) for lower precision operands a, b. - replace_with = NewIntegralAbs(GetGraph()->GetArena(), true_value, select); + replace_with = NewIntegralAbs(GetGraph()->GetAllocator(), true_value, select); } } } @@ -1173,7 +1176,8 @@ void InstructionSimplifierVisitor::VisitAdd(HAdd* instruction) { // particular, we do not want the live range of `b` to be extended if we are // not sure the initial 'NEG' instruction can be removed. HInstruction* other = left_is_neg ? right : left; - HSub* sub = new(GetGraph()->GetArena()) HSub(instruction->GetType(), other, neg->GetInput()); + HSub* sub = + new(GetGraph()->GetAllocator()) HSub(instruction->GetType(), other, neg->GetInput()); instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, sub); RecordSimplification(); neg->GetBlock()->RemoveInstruction(neg); @@ -1251,10 +1255,10 @@ void InstructionSimplifierVisitor::VisitAnd(HAnd* instruction) { DCHECK_NE(new_and_input->GetType(), DataType::Type::kInt64); HConstant* new_const = GetGraph()->GetConstant(DataType::Type::kInt32, value); HAnd* new_and = - new (GetGraph()->GetArena()) HAnd(DataType::Type::kInt32, new_and_input, new_const); + new (GetGraph()->GetAllocator()) HAnd(DataType::Type::kInt32, new_and_input, new_const); instruction->GetBlock()->InsertInstructionBefore(new_and, instruction); HTypeConversion* new_conversion = - new (GetGraph()->GetArena()) HTypeConversion(DataType::Type::kInt64, new_and); + new (GetGraph()->GetAllocator()) HTypeConversion(DataType::Type::kInt64, new_and); instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, new_conversion); input_other->GetBlock()->RemoveInstruction(input_other); RecordSimplification(); @@ -1279,7 +1283,7 @@ void InstructionSimplifierVisitor::VisitAnd(HAnd* instruction) { input_other->HasOnlyOneNonEnvironmentUse()) { DCHECK(input_other->IsShr()); // For UShr, we would have taken the branch above. // Replace SHR+AND with USHR, for example "(x >> 24) & 0xff" -> "x >>> 24". - HUShr* ushr = new (GetGraph()->GetArena()) HUShr(instruction->GetType(), + HUShr* ushr = new (GetGraph()->GetAllocator()) HUShr(instruction->GetType(), input_other->InputAt(0), input_other->InputAt(1), input_other->GetDexPc()); @@ -1410,7 +1414,8 @@ void InstructionSimplifierVisitor::VisitCondition(HCondition* condition) { // on the right hand side. if (condition->GetLeft()->IsConstant() && !condition->GetRight()->IsConstant()) { HBasicBlock* block = condition->GetBlock(); - HCondition* replacement = GetOppositeConditionSwapOps(block->GetGraph()->GetArena(), condition); + HCondition* replacement = + GetOppositeConditionSwapOps(block->GetGraph()->GetAllocator(), condition); // If it is a fp we must set the opposite bias. if (replacement != nullptr) { if (condition->IsLtBias()) { @@ -1506,7 +1511,7 @@ void InstructionSimplifierVisitor::VisitDiv(HDiv* instruction) { // with // NEG dst, src instruction->GetBlock()->ReplaceAndRemoveInstructionWith( - instruction, new (GetGraph()->GetArena()) HNeg(type, input_other)); + instruction, new (GetGraph()->GetAllocator()) HNeg(type, input_other)); RecordSimplification(); return; } @@ -1532,7 +1537,7 @@ void InstructionSimplifierVisitor::VisitDiv(HDiv* instruction) { if (reciprocal != nullptr) { instruction->GetBlock()->ReplaceAndRemoveInstructionWith( - instruction, new (GetGraph()->GetArena()) HMul(type, input_other, reciprocal)); + instruction, new (GetGraph()->GetAllocator()) HMul(type, input_other, reciprocal)); RecordSimplification(); return; } @@ -1544,7 +1549,7 @@ void InstructionSimplifierVisitor::VisitMul(HMul* instruction) { HInstruction* input_other = instruction->GetLeastConstantLeft(); DataType::Type type = instruction->GetType(); HBasicBlock* block = instruction->GetBlock(); - ArenaAllocator* allocator = GetGraph()->GetArena(); + ArenaAllocator* allocator = GetGraph()->GetAllocator(); if (input_cst == nullptr) { return; @@ -1683,8 +1688,8 @@ void InstructionSimplifierVisitor::VisitNeg(HNeg* instruction) { // removed. // We do not perform optimization for fp because we could lose the sign of zero. HSub* sub = input->AsSub(); - HSub* new_sub = - new (GetGraph()->GetArena()) HSub(instruction->GetType(), sub->GetRight(), sub->GetLeft()); + HSub* new_sub = new (GetGraph()->GetAllocator()) HSub( + instruction->GetType(), sub->GetRight(), sub->GetLeft()); instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, new_sub); if (!sub->HasUses()) { sub->GetBlock()->RemoveInstruction(sub); @@ -1786,7 +1791,7 @@ void InstructionSimplifierVisitor::VisitSub(HSub* instruction) { } HBasicBlock* block = instruction->GetBlock(); - ArenaAllocator* allocator = GetGraph()->GetArena(); + ArenaAllocator* allocator = GetGraph()->GetAllocator(); HInstruction* left = instruction->GetLeft(); HInstruction* right = instruction->GetRight(); @@ -1818,7 +1823,7 @@ void InstructionSimplifierVisitor::VisitSub(HSub* instruction) { // SUB dst, a, tmp // with // ADD dst, a, b - HAdd* add = new(GetGraph()->GetArena()) HAdd(type, left, right->AsNeg()->GetInput()); + HAdd* add = new(GetGraph()->GetAllocator()) HAdd(type, left, right->AsNeg()->GetInput()); instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, add); RecordSimplification(); right->GetBlock()->RemoveInstruction(right); @@ -1834,9 +1839,9 @@ void InstructionSimplifierVisitor::VisitSub(HSub* instruction) { // NEG dst, tmp // The second version is not intrinsically better, but enables more // transformations. - HAdd* add = new(GetGraph()->GetArena()) HAdd(type, left->AsNeg()->GetInput(), right); + HAdd* add = new(GetGraph()->GetAllocator()) HAdd(type, left->AsNeg()->GetInput(), right); instruction->GetBlock()->InsertInstructionBefore(add, instruction); - HNeg* neg = new (GetGraph()->GetArena()) HNeg(instruction->GetType(), add); + HNeg* neg = new (GetGraph()->GetAllocator()) HNeg(instruction->GetType(), add); instruction->GetBlock()->InsertInstructionBefore(neg, instruction); instruction->ReplaceWith(neg); instruction->GetBlock()->RemoveInstruction(instruction); @@ -1898,7 +1903,7 @@ void InstructionSimplifierVisitor::VisitXor(HXor* instruction) { // XOR dst, src, 1 // with // BOOLEAN_NOT dst, src - HBooleanNot* boolean_not = new (GetGraph()->GetArena()) HBooleanNot(input_other); + HBooleanNot* boolean_not = new (GetGraph()->GetAllocator()) HBooleanNot(input_other); instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, boolean_not); RecordSimplification(); return; @@ -1909,7 +1914,7 @@ void InstructionSimplifierVisitor::VisitXor(HXor* instruction) { // XOR dst, src, 0xFFF...FF // with // NOT dst, src - HNot* bitwise_not = new (GetGraph()->GetArena()) HNot(instruction->GetType(), input_other); + HNot* bitwise_not = new (GetGraph()->GetAllocator()) HNot(instruction->GetType(), input_other); instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, bitwise_not); RecordSimplification(); return; @@ -1980,10 +1985,10 @@ void InstructionSimplifierVisitor::SimplifyRotate(HInvoke* invoke, // Unconditionally set the type of the negated distance to `int`, // as shift and rotate operations expect a 32-bit (or narrower) // value for their distance input. - distance = new (GetGraph()->GetArena()) HNeg(DataType::Type::kInt32, distance); + distance = new (GetGraph()->GetAllocator()) HNeg(DataType::Type::kInt32, distance); invoke->GetBlock()->InsertInstructionBefore(distance, invoke); } - HRor* ror = new (GetGraph()->GetArena()) HRor(type, value, distance); + HRor* ror = new (GetGraph()->GetAllocator()) HRor(type, value, distance); invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, ror); // Remove ClinitCheck and LoadClass, if possible. HInstruction* clinit = invoke->GetInputs().back(); @@ -2127,7 +2132,7 @@ void InstructionSimplifierVisitor::SimplifyCompare(HInvoke* invoke, } else { right = GetGraph()->GetIntConstant(0); } - HCompare* compare = new (GetGraph()->GetArena()) + HCompare* compare = new (GetGraph()->GetAllocator()) HCompare(type, left, right, ComparisonBias::kNoBias, dex_pc); invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, compare); } @@ -2137,7 +2142,7 @@ void InstructionSimplifierVisitor::SimplifyIsNaN(HInvoke* invoke) { uint32_t dex_pc = invoke->GetDexPc(); // IsNaN(x) is the same as x != x. HInstruction* x = invoke->InputAt(0); - HCondition* condition = new (GetGraph()->GetArena()) HNotEqual(x, x, dex_pc); + HCondition* condition = new (GetGraph()->GetAllocator()) HNotEqual(x, x, dex_pc); condition->SetBias(ComparisonBias::kLtBias); invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, condition); } @@ -2164,11 +2169,11 @@ void InstructionSimplifierVisitor::SimplifyFP2Int(HInvoke* invoke) { kNoThrow); } // Test IsNaN(x), which is the same as x != x. - HCondition* condition = new (GetGraph()->GetArena()) HNotEqual(x, x, dex_pc); + HCondition* condition = new (GetGraph()->GetAllocator()) HNotEqual(x, x, dex_pc); condition->SetBias(ComparisonBias::kLtBias); invoke->GetBlock()->InsertInstructionBefore(condition, invoke->GetNext()); // Select between the two. - HInstruction* select = new (GetGraph()->GetArena()) HSelect(condition, nan, invoke, dex_pc); + HInstruction* select = new (GetGraph()->GetAllocator()) HSelect(condition, nan, invoke, dex_pc); invoke->GetBlock()->InsertInstructionBefore(select, condition->GetNext()); invoke->ReplaceWithExceptInReplacementAtIndex(select, 0); // false at index 0 } @@ -2177,20 +2182,20 @@ void InstructionSimplifierVisitor::SimplifyStringCharAt(HInvoke* invoke) { HInstruction* str = invoke->InputAt(0); HInstruction* index = invoke->InputAt(1); uint32_t dex_pc = invoke->GetDexPc(); - ArenaAllocator* arena = GetGraph()->GetArena(); + ArenaAllocator* allocator = GetGraph()->GetAllocator(); // We treat String as an array to allow DCE and BCE to seamlessly work on strings, // so create the HArrayLength, HBoundsCheck and HArrayGet. - HArrayLength* length = new (arena) HArrayLength(str, dex_pc, /* is_string_length */ true); + HArrayLength* length = new (allocator) HArrayLength(str, dex_pc, /* is_string_length */ true); invoke->GetBlock()->InsertInstructionBefore(length, invoke); - HBoundsCheck* bounds_check = new (arena) HBoundsCheck( + HBoundsCheck* bounds_check = new (allocator) HBoundsCheck( index, length, dex_pc, invoke->GetDexMethodIndex()); invoke->GetBlock()->InsertInstructionBefore(bounds_check, invoke); - HArrayGet* array_get = new (arena) HArrayGet(str, - bounds_check, - DataType::Type::kUint16, - SideEffects::None(), // Strings are immutable. - dex_pc, - /* is_string_char_at */ true); + HArrayGet* array_get = new (allocator) HArrayGet(str, + bounds_check, + DataType::Type::kUint16, + SideEffects::None(), // Strings are immutable. + dex_pc, + /* is_string_char_at */ true); invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, array_get); bounds_check->CopyEnvironmentFrom(invoke->GetEnvironment()); GetGraph()->SetHasBoundsChecks(true); @@ -2202,13 +2207,13 @@ void InstructionSimplifierVisitor::SimplifyStringIsEmptyOrLength(HInvoke* invoke // We treat String as an array to allow DCE and BCE to seamlessly work on strings, // so create the HArrayLength. HArrayLength* length = - new (GetGraph()->GetArena()) HArrayLength(str, dex_pc, /* is_string_length */ true); + new (GetGraph()->GetAllocator()) HArrayLength(str, dex_pc, /* is_string_length */ true); HInstruction* replacement; if (invoke->GetIntrinsic() == Intrinsics::kStringIsEmpty) { // For String.isEmpty(), create the `HEqual` representing the `length == 0`. invoke->GetBlock()->InsertInstructionBefore(length, invoke); HIntConstant* zero = GetGraph()->GetIntConstant(0); - HEqual* equal = new (GetGraph()->GetArena()) HEqual(length, zero, dex_pc); + HEqual* equal = new (GetGraph()->GetAllocator()) HEqual(length, zero, dex_pc); replacement = equal; } else { DCHECK_EQ(invoke->GetIntrinsic(), Intrinsics::kStringLength); @@ -2278,9 +2283,11 @@ void InstructionSimplifierVisitor::SimplifyAllocationIntrinsic(HInvoke* invoke) } } -void InstructionSimplifierVisitor::SimplifyMemBarrier(HInvoke* invoke, MemBarrierKind barrier_kind) { +void InstructionSimplifierVisitor::SimplifyMemBarrier(HInvoke* invoke, + MemBarrierKind barrier_kind) { uint32_t dex_pc = invoke->GetDexPc(); - HMemoryBarrier* mem_barrier = new (GetGraph()->GetArena()) HMemoryBarrier(barrier_kind, dex_pc); + HMemoryBarrier* mem_barrier = + new (GetGraph()->GetAllocator()) HMemoryBarrier(barrier_kind, dex_pc); invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, mem_barrier); } @@ -2519,13 +2526,13 @@ bool InstructionSimplifierVisitor::TrySubtractionChainSimplification( int64_t const3_val = ComputeAddition(type, const1_val, const2_val); HBasicBlock* block = instruction->GetBlock(); HConstant* const3 = block->GetGraph()->GetConstant(type, const3_val); - ArenaAllocator* arena = instruction->GetArena(); + ArenaAllocator* allocator = instruction->GetAllocator(); HInstruction* z; if (is_x_negated) { - z = new (arena) HSub(type, const3, x, instruction->GetDexPc()); + z = new (allocator) HSub(type, const3, x, instruction->GetDexPc()); } else { - z = new (arena) HAdd(type, x, const3, instruction->GetDexPc()); + z = new (allocator) HAdd(type, x, const3, instruction->GetDexPc()); } block->ReplaceAndRemoveInstructionWith(instruction, z); diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc index 7439893787..9422f9f30c 100644 --- a/compiler/optimizing/instruction_simplifier_arm.cc +++ b/compiler/optimizing/instruction_simplifier_arm.cc @@ -137,12 +137,12 @@ bool InstructionSimplifierArmVisitor::TryMergeIntoShifterOperand(HInstruction* u if (do_merge) { HDataProcWithShifterOp* alu_with_op = - new (GetGraph()->GetArena()) HDataProcWithShifterOp(use, - other_input, - bitfield_op->InputAt(0), - op_kind, - shift_amount, - use->GetDexPc()); + new (GetGraph()->GetAllocator()) HDataProcWithShifterOp(use, + other_input, + bitfield_op->InputAt(0), + op_kind, + shift_amount, + use->GetDexPc()); use->GetBlock()->ReplaceAndRemoveInstructionWith(use, alu_with_op); if (bitfield_op->GetUses().empty()) { bitfield_op->GetBlock()->RemoveInstruction(bitfield_op); diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc index c639953536..c0ab68fec2 100644 --- a/compiler/optimizing/instruction_simplifier_arm64.cc +++ b/compiler/optimizing/instruction_simplifier_arm64.cc @@ -141,12 +141,12 @@ bool InstructionSimplifierArm64Visitor::TryMergeIntoShifterOperand(HInstruction* if (do_merge) { HDataProcWithShifterOp* alu_with_op = - new (GetGraph()->GetArena()) HDataProcWithShifterOp(use, - other_input, - bitfield_op->InputAt(0), - op_kind, - shift_amount, - use->GetDexPc()); + new (GetGraph()->GetAllocator()) HDataProcWithShifterOp(use, + other_input, + bitfield_op->InputAt(0), + op_kind, + shift_amount, + use->GetDexPc()); use->GetBlock()->ReplaceAndRemoveInstructionWith(use, alu_with_op); if (bitfield_op->GetUses().empty()) { bitfield_op->GetBlock()->RemoveInstruction(bitfield_op); diff --git a/compiler/optimizing/instruction_simplifier_mips.cc b/compiler/optimizing/instruction_simplifier_mips.cc index 4bf1bfb9f3..6a0d8a60c4 100644 --- a/compiler/optimizing/instruction_simplifier_mips.cc +++ b/compiler/optimizing/instruction_simplifier_mips.cc @@ -74,7 +74,7 @@ bool InstructionSimplifierMipsVisitor::TryExtractArrayAccessIndex(HInstruction* } HGraph* graph = access->GetBlock()->GetGraph(); - ArenaAllocator* arena = graph->GetArena(); + ArenaAllocator* allocator = graph->GetAllocator(); size_t component_shift = DataType::SizeShift(packed_type); bool is_extracting_beneficial = false; @@ -113,7 +113,7 @@ bool InstructionSimplifierMipsVisitor::TryExtractArrayAccessIndex(HInstruction* HIntConstant* shift = graph->GetIntConstant(component_shift); HIntermediateArrayAddressIndex* address = - new (arena) HIntermediateArrayAddressIndex(index, shift, kNoDexPc); + new (allocator) HIntermediateArrayAddressIndex(index, shift, kNoDexPc); access->GetBlock()->InsertInstructionBefore(address, access); access->ReplaceInput(address, 1); return true; diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc index 73d866fbea..1c13084a48 100644 --- a/compiler/optimizing/instruction_simplifier_shared.cc +++ b/compiler/optimizing/instruction_simplifier_shared.cc @@ -75,8 +75,8 @@ bool TrySimpleMultiplyAccumulatePatterns(HMul* mul, return false; } - ArenaAllocator* arena = mul->GetBlock()->GetGraph()->GetArena(); - HMultiplyAccumulate* mulacc = new(arena) HMultiplyAccumulate( + ArenaAllocator* allocator = mul->GetBlock()->GetGraph()->GetAllocator(); + HMultiplyAccumulate* mulacc = new (allocator) HMultiplyAccumulate( mul->GetType(), op_kind, input_a, input_a, input_b, mul->GetDexPc()); mul->GetBlock()->ReplaceAndRemoveInstructionWith(mul, mulacc); @@ -105,7 +105,7 @@ bool TryCombineMultiplyAccumulate(HMul* mul, InstructionSet isa) { return false; } - ArenaAllocator* arena = mul->GetBlock()->GetGraph()->GetArena(); + ArenaAllocator* allocator = mul->GetBlock()->GetGraph()->GetAllocator(); if (mul->HasOnlyOneNonEnvironmentUse()) { HInstruction* use = mul->GetUses().front().GetUser(); @@ -137,11 +137,11 @@ bool TryCombineMultiplyAccumulate(HMul* mul, InstructionSet isa) { if (accumulator != nullptr) { HMultiplyAccumulate* mulacc = - new (arena) HMultiplyAccumulate(type, - binop->GetKind(), - accumulator, - mul->GetLeft(), - mul->GetRight()); + new (allocator) HMultiplyAccumulate(type, + binop->GetKind(), + accumulator, + mul->GetLeft(), + mul->GetRight()); binop->GetBlock()->ReplaceAndRemoveInstructionWith(binop, mulacc); DCHECK(!mul->HasUses()); @@ -150,11 +150,11 @@ bool TryCombineMultiplyAccumulate(HMul* mul, InstructionSet isa) { } } else if (use->IsNeg() && isa != kArm) { HMultiplyAccumulate* mulacc = - new (arena) HMultiplyAccumulate(type, - HInstruction::kSub, - mul->GetBlock()->GetGraph()->GetConstant(type, 0), - mul->GetLeft(), - mul->GetRight()); + new (allocator) HMultiplyAccumulate(type, + HInstruction::kSub, + mul->GetBlock()->GetGraph()->GetConstant(type, 0), + mul->GetLeft(), + mul->GetRight()); use->GetBlock()->ReplaceAndRemoveInstructionWith(use, mulacc); DCHECK(!mul->HasUses()); @@ -216,7 +216,7 @@ bool TryMergeNegatedInput(HBinaryOperation* op) { // BIC dst, src, mask (respectively ORN, EON) HInstruction* src = hnot->AsNot()->GetInput(); - HBitwiseNegatedRight* neg_op = new (hnot->GetBlock()->GetGraph()->GetArena()) + HBitwiseNegatedRight* neg_op = new (hnot->GetBlock()->GetGraph()->GetAllocator()) HBitwiseNegatedRight(op->GetType(), op->GetKind(), hother, src, op->GetDexPc()); op->GetBlock()->ReplaceAndRemoveInstructionWith(op, neg_op); @@ -255,10 +255,10 @@ bool TryExtractArrayAccessAddress(HInstruction* access, // Proceed to extract the base address computation. HGraph* graph = access->GetBlock()->GetGraph(); - ArenaAllocator* arena = graph->GetArena(); + ArenaAllocator* allocator = graph->GetAllocator(); HIntConstant* offset = graph->GetIntConstant(data_offset); - HIntermediateAddress* address = new (arena) HIntermediateAddress(array, offset, kNoDexPc); + HIntermediateAddress* address = new (allocator) HIntermediateAddress(array, offset, kNoDexPc); // TODO: Is it ok to not have this on the intermediate address? // address->SetReferenceTypeInfo(array->GetReferenceTypeInfo()); access->GetBlock()->InsertInstructionBefore(address, access); @@ -289,7 +289,7 @@ bool TryExtractVecArrayAccessAddress(HVecMemoryOperation* access, HInstruction* } HGraph* graph = access->GetBlock()->GetGraph(); - ArenaAllocator* arena = graph->GetArena(); + ArenaAllocator* allocator = graph->GetAllocator(); DataType::Type packed_type = access->GetPackedType(); uint32_t data_offset = mirror::Array::DataOffset( DataType::Size(packed_type)).Uint32Value(); @@ -328,7 +328,7 @@ bool TryExtractVecArrayAccessAddress(HVecMemoryOperation* access, HInstruction* HIntConstant* offset = graph->GetIntConstant(data_offset); HIntConstant* shift = graph->GetIntConstant(component_shift); HIntermediateAddressIndex* address = - new (arena) HIntermediateAddressIndex(index, offset, shift, kNoDexPc); + new (allocator) HIntermediateAddressIndex(index, offset, shift, kNoDexPc); access->GetBlock()->InsertInstructionBefore(address, access); access->ReplaceInput(address, 1); diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc index 0f14d2728b..dfae534555 100644 --- a/compiler/optimizing/intrinsics.cc +++ b/compiler/optimizing/intrinsics.cc @@ -220,7 +220,7 @@ void IntrinsicVisitor::ComputeIntegerValueOfLocations(HInvoke* invoke, } // The intrinsic will call if it needs to allocate a j.l.Integer. - LocationSummary* locations = new (invoke->GetBlock()->GetGraph()->GetArena()) LocationSummary( + LocationSummary* locations = new (invoke->GetBlock()->GetGraph()->GetAllocator()) LocationSummary( invoke, LocationSummary::kCallOnMainOnly, kIntrinsified); if (!invoke->InputAt(0)->IsConstant()) { locations->SetInAt(0, Location::RequiresRegister()); diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h index 7abfd5b74e..4429e6e5b7 100644 --- a/compiler/optimizing/intrinsics.h +++ b/compiler/optimizing/intrinsics.h @@ -100,7 +100,7 @@ class IntrinsicVisitor : public ValueObject { // We're moving potentially two or more locations to locations that could overlap, so we need // a parallel move resolver. - HParallelMove parallel_move(codegen->GetGraph()->GetArena()); + HParallelMove parallel_move(codegen->GetGraph()->GetAllocator()); for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) { HInstruction* input = invoke->InputAt(i); diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 75a1ce7e6f..ee07c4f65c 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -70,7 +70,7 @@ MacroAssembler* IntrinsicCodeGeneratorARM64::GetVIXLAssembler() { } ArenaAllocator* IntrinsicCodeGeneratorARM64::GetAllocator() { - return codegen_->GetGraph()->GetArena(); + return codegen_->GetGraph()->GetAllocator(); } #define __ codegen->GetVIXLAssembler()-> @@ -236,18 +236,16 @@ bool IntrinsicLocationsBuilderARM64::TryDispatch(HInvoke* invoke) { #define __ masm-> -static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); } -static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresFpuRegister()); } @@ -267,10 +265,10 @@ static void MoveIntToFP(LocationSummary* locations, bool is64bit, MacroAssembler } void IntrinsicLocationsBuilderARM64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) { - CreateFPToIntLocations(arena_, invoke); + CreateFPToIntLocations(allocator_, invoke); } void IntrinsicLocationsBuilderARM64::VisitDoubleLongBitsToDouble(HInvoke* invoke) { - CreateIntToFPLocations(arena_, invoke); + CreateIntToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) { @@ -281,10 +279,10 @@ void IntrinsicCodeGeneratorARM64::VisitDoubleLongBitsToDouble(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitFloatFloatToRawIntBits(HInvoke* invoke) { - CreateFPToIntLocations(arena_, invoke); + CreateFPToIntLocations(allocator_, invoke); } void IntrinsicLocationsBuilderARM64::VisitFloatIntBitsToFloat(HInvoke* invoke) { - CreateIntToFPLocations(arena_, invoke); + CreateIntToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitFloatFloatToRawIntBits(HInvoke* invoke) { @@ -294,10 +292,9 @@ void IntrinsicCodeGeneratorARM64::VisitFloatIntBitsToFloat(HInvoke* invoke) { MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetVIXLAssembler()); } -static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); } @@ -324,7 +321,7 @@ static void GenReverseBytes(LocationSummary* locations, } void IntrinsicLocationsBuilderARM64::VisitIntegerReverseBytes(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitIntegerReverseBytes(HInvoke* invoke) { @@ -332,7 +329,7 @@ void IntrinsicCodeGeneratorARM64::VisitIntegerReverseBytes(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitLongReverseBytes(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitLongReverseBytes(HInvoke* invoke) { @@ -340,17 +337,16 @@ void IntrinsicCodeGeneratorARM64::VisitLongReverseBytes(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitShortReverseBytes(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitShortReverseBytes(HInvoke* invoke) { GenReverseBytes(invoke->GetLocations(), DataType::Type::kInt16, GetVIXLAssembler()); } -static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); @@ -368,7 +364,7 @@ static void GenNumberOfLeadingZeros(LocationSummary* locations, } void IntrinsicLocationsBuilderARM64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { @@ -376,7 +372,7 @@ void IntrinsicCodeGeneratorARM64::VisitIntegerNumberOfLeadingZeros(HInvoke* invo } void IntrinsicLocationsBuilderARM64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { @@ -396,7 +392,7 @@ static void GenNumberOfTrailingZeros(LocationSummary* locations, } void IntrinsicLocationsBuilderARM64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) { @@ -404,7 +400,7 @@ void IntrinsicCodeGeneratorARM64::VisitIntegerNumberOfTrailingZeros(HInvoke* inv } void IntrinsicLocationsBuilderARM64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) { @@ -423,7 +419,7 @@ static void GenReverse(LocationSummary* locations, } void IntrinsicLocationsBuilderARM64::VisitIntegerReverse(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitIntegerReverse(HInvoke* invoke) { @@ -431,7 +427,7 @@ void IntrinsicCodeGeneratorARM64::VisitIntegerReverse(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitLongReverse(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitLongReverse(HInvoke* invoke) { @@ -456,7 +452,7 @@ static void GenBitCount(HInvoke* instr, DataType::Type type, MacroAssembler* mas } void IntrinsicLocationsBuilderARM64::VisitLongBitCount(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitLongBitCount(HInvoke* invoke) { @@ -464,7 +460,7 @@ void IntrinsicCodeGeneratorARM64::VisitLongBitCount(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitIntegerBitCount(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitIntegerBitCount(HInvoke* invoke) { @@ -489,7 +485,7 @@ static void GenHighestOneBit(HInvoke* invoke, DataType::Type type, MacroAssemble } void IntrinsicLocationsBuilderARM64::VisitIntegerHighestOneBit(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitIntegerHighestOneBit(HInvoke* invoke) { @@ -497,7 +493,7 @@ void IntrinsicCodeGeneratorARM64::VisitIntegerHighestOneBit(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitLongHighestOneBit(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitLongHighestOneBit(HInvoke* invoke) { @@ -518,7 +514,7 @@ static void GenLowestOneBit(HInvoke* invoke, DataType::Type type, MacroAssembler } void IntrinsicLocationsBuilderARM64::VisitIntegerLowestOneBit(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitIntegerLowestOneBit(HInvoke* invoke) { @@ -526,17 +522,16 @@ void IntrinsicCodeGeneratorARM64::VisitIntegerLowestOneBit(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitLongLowestOneBit(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitLongLowestOneBit(HInvoke* invoke) { GenLowestOneBit(invoke, DataType::Type::kInt64, GetVIXLAssembler()); } -static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); } @@ -552,7 +547,7 @@ static void MathAbsFP(LocationSummary* locations, bool is64bit, MacroAssembler* } void IntrinsicLocationsBuilderARM64::VisitMathAbsDouble(HInvoke* invoke) { - CreateFPToFPLocations(arena_, invoke); + CreateFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathAbsDouble(HInvoke* invoke) { @@ -560,7 +555,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathAbsDouble(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathAbsFloat(HInvoke* invoke) { - CreateFPToFPLocations(arena_, invoke); + CreateFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathAbsFloat(HInvoke* invoke) { @@ -581,7 +576,7 @@ static void GenAbsInteger(LocationSummary* locations, } void IntrinsicLocationsBuilderARM64::VisitMathAbsInt(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathAbsInt(HInvoke* invoke) { @@ -589,7 +584,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathAbsInt(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathAbsLong(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathAbsLong(HInvoke* invoke) { @@ -614,17 +609,16 @@ static void GenMinMaxFP(LocationSummary* locations, } } -static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); } void IntrinsicLocationsBuilderARM64::VisitMathMinDoubleDouble(HInvoke* invoke) { - CreateFPFPToFPLocations(arena_, invoke); + CreateFPFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathMinDoubleDouble(HInvoke* invoke) { @@ -632,7 +626,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathMinDoubleDouble(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathMinFloatFloat(HInvoke* invoke) { - CreateFPFPToFPLocations(arena_, invoke); + CreateFPFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathMinFloatFloat(HInvoke* invoke) { @@ -640,7 +634,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathMinFloatFloat(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathMaxDoubleDouble(HInvoke* invoke) { - CreateFPFPToFPLocations(arena_, invoke); + CreateFPFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathMaxDoubleDouble(HInvoke* invoke) { @@ -648,7 +642,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathMaxDoubleDouble(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathMaxFloatFloat(HInvoke* invoke) { - CreateFPFPToFPLocations(arena_, invoke); + CreateFPFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathMaxFloatFloat(HInvoke* invoke) { @@ -673,7 +667,7 @@ static void GenMinMax(LocationSummary* locations, } void IntrinsicLocationsBuilderARM64::VisitMathMinIntInt(HInvoke* invoke) { - CreateIntIntToIntLocations(arena_, invoke); + CreateIntIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathMinIntInt(HInvoke* invoke) { @@ -681,7 +675,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathMinIntInt(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathMinLongLong(HInvoke* invoke) { - CreateIntIntToIntLocations(arena_, invoke); + CreateIntIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathMinLongLong(HInvoke* invoke) { @@ -689,7 +683,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathMinLongLong(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathMaxIntInt(HInvoke* invoke) { - CreateIntIntToIntLocations(arena_, invoke); + CreateIntIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathMaxIntInt(HInvoke* invoke) { @@ -697,7 +691,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathMaxIntInt(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathMaxLongLong(HInvoke* invoke) { - CreateIntIntToIntLocations(arena_, invoke); + CreateIntIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathMaxLongLong(HInvoke* invoke) { @@ -705,7 +699,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathMaxLongLong(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathSqrt(HInvoke* invoke) { - CreateFPToFPLocations(arena_, invoke); + CreateFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathSqrt(HInvoke* invoke) { @@ -715,7 +709,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathSqrt(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathCeil(HInvoke* invoke) { - CreateFPToFPLocations(arena_, invoke); + CreateFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathCeil(HInvoke* invoke) { @@ -725,7 +719,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathCeil(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathFloor(HInvoke* invoke) { - CreateFPToFPLocations(arena_, invoke); + CreateFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathFloor(HInvoke* invoke) { @@ -735,7 +729,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathFloor(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathRint(HInvoke* invoke) { - CreateFPToFPLocations(arena_, invoke); + CreateFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathRint(HInvoke* invoke) { @@ -744,10 +738,9 @@ void IntrinsicCodeGeneratorARM64::VisitMathRint(HInvoke* invoke) { __ Frintn(DRegisterFrom(locations->Out()), DRegisterFrom(locations->InAt(0))); } -static void CreateFPToIntPlusFPTempLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateFPToIntPlusFPTempLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); locations->AddTemp(Location::RequiresFpuRegister()); @@ -791,7 +784,7 @@ static void GenMathRound(HInvoke* invoke, bool is_double, vixl::aarch64::MacroAs } void IntrinsicLocationsBuilderARM64::VisitMathRoundDouble(HInvoke* invoke) { - CreateFPToIntPlusFPTempLocations(arena_, invoke); + CreateFPToIntPlusFPTempLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathRoundDouble(HInvoke* invoke) { @@ -799,7 +792,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathRoundDouble(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathRoundFloat(HInvoke* invoke) { - CreateFPToIntPlusFPTempLocations(arena_, invoke); + CreateFPToIntPlusFPTempLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathRoundFloat(HInvoke* invoke) { @@ -807,7 +800,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathRoundFloat(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMemoryPeekByte(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMemoryPeekByte(HInvoke* invoke) { @@ -817,7 +810,7 @@ void IntrinsicCodeGeneratorARM64::VisitMemoryPeekByte(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMemoryPeekIntNative(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMemoryPeekIntNative(HInvoke* invoke) { @@ -827,7 +820,7 @@ void IntrinsicCodeGeneratorARM64::VisitMemoryPeekIntNative(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMemoryPeekLongNative(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMemoryPeekLongNative(HInvoke* invoke) { @@ -837,7 +830,7 @@ void IntrinsicCodeGeneratorARM64::VisitMemoryPeekLongNative(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMemoryPeekShortNative(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMemoryPeekShortNative(HInvoke* invoke) { @@ -846,16 +839,15 @@ void IntrinsicCodeGeneratorARM64::VisitMemoryPeekShortNative(HInvoke* invoke) { AbsoluteHeapOperandFrom(invoke->GetLocations()->InAt(0), 0)); } -static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); } void IntrinsicLocationsBuilderARM64::VisitMemoryPokeByte(HInvoke* invoke) { - CreateIntIntToVoidLocations(arena_, invoke); + CreateIntIntToVoidLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMemoryPokeByte(HInvoke* invoke) { @@ -865,7 +857,7 @@ void IntrinsicCodeGeneratorARM64::VisitMemoryPokeByte(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMemoryPokeIntNative(HInvoke* invoke) { - CreateIntIntToVoidLocations(arena_, invoke); + CreateIntIntToVoidLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMemoryPokeIntNative(HInvoke* invoke) { @@ -875,7 +867,7 @@ void IntrinsicCodeGeneratorARM64::VisitMemoryPokeIntNative(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMemoryPokeLongNative(HInvoke* invoke) { - CreateIntIntToVoidLocations(arena_, invoke); + CreateIntIntToVoidLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMemoryPokeLongNative(HInvoke* invoke) { @@ -885,7 +877,7 @@ void IntrinsicCodeGeneratorARM64::VisitMemoryPokeLongNative(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMemoryPokeShortNative(HInvoke* invoke) { - CreateIntIntToVoidLocations(arena_, invoke); + CreateIntIntToVoidLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMemoryPokeShortNative(HInvoke* invoke) { @@ -895,9 +887,8 @@ void IntrinsicCodeGeneratorARM64::VisitMemoryPokeShortNative(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitThreadCurrentThread(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetOut(Location::RequiresRegister()); } @@ -949,15 +940,16 @@ static void GenUnsafeGet(HInvoke* invoke, } } -static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { +static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { bool can_call = kEmitCompilerReadBarrier && (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject || invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile); - LocationSummary* locations = new (arena) LocationSummary(invoke, - (can_call - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall), - kIntrinsified); + LocationSummary* locations = + new (allocator) LocationSummary(invoke, + can_call + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall, + kIntrinsified); if (can_call && kUseBakerReadBarrier) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. // We need a temporary register for the read barrier marking slow @@ -972,22 +964,22 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke } void IntrinsicLocationsBuilderARM64::VisitUnsafeGet(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke); + CreateIntIntIntToIntLocations(allocator_, invoke); } void IntrinsicLocationsBuilderARM64::VisitUnsafeGetVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke); + CreateIntIntIntToIntLocations(allocator_, invoke); } void IntrinsicLocationsBuilderARM64::VisitUnsafeGetLong(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke); + CreateIntIntIntToIntLocations(allocator_, invoke); } void IntrinsicLocationsBuilderARM64::VisitUnsafeGetLongVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke); + CreateIntIntIntToIntLocations(allocator_, invoke); } void IntrinsicLocationsBuilderARM64::VisitUnsafeGetObject(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke); + CreateIntIntIntToIntLocations(allocator_, invoke); } void IntrinsicLocationsBuilderARM64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke); + CreateIntIntIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitUnsafeGet(HInvoke* invoke) { @@ -1009,10 +1001,9 @@ void IntrinsicCodeGeneratorARM64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_); } -static void CreateIntIntIntIntToVoid(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::NoLocation()); // Unused receiver. locations->SetInAt(1, Location::RequiresRegister()); locations->SetInAt(2, Location::RequiresRegister()); @@ -1020,31 +1011,31 @@ static void CreateIntIntIntIntToVoid(ArenaAllocator* arena, HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitUnsafePut(HInvoke* invoke) { - CreateIntIntIntIntToVoid(arena_, invoke); + CreateIntIntIntIntToVoid(allocator_, invoke); } void IntrinsicLocationsBuilderARM64::VisitUnsafePutOrdered(HInvoke* invoke) { - CreateIntIntIntIntToVoid(arena_, invoke); + CreateIntIntIntIntToVoid(allocator_, invoke); } void IntrinsicLocationsBuilderARM64::VisitUnsafePutVolatile(HInvoke* invoke) { - CreateIntIntIntIntToVoid(arena_, invoke); + CreateIntIntIntIntToVoid(allocator_, invoke); } void IntrinsicLocationsBuilderARM64::VisitUnsafePutObject(HInvoke* invoke) { - CreateIntIntIntIntToVoid(arena_, invoke); + CreateIntIntIntIntToVoid(allocator_, invoke); } void IntrinsicLocationsBuilderARM64::VisitUnsafePutObjectOrdered(HInvoke* invoke) { - CreateIntIntIntIntToVoid(arena_, invoke); + CreateIntIntIntIntToVoid(allocator_, invoke); } void IntrinsicLocationsBuilderARM64::VisitUnsafePutObjectVolatile(HInvoke* invoke) { - CreateIntIntIntIntToVoid(arena_, invoke); + CreateIntIntIntIntToVoid(allocator_, invoke); } void IntrinsicLocationsBuilderARM64::VisitUnsafePutLong(HInvoke* invoke) { - CreateIntIntIntIntToVoid(arena_, invoke); + CreateIntIntIntIntToVoid(allocator_, invoke); } void IntrinsicLocationsBuilderARM64::VisitUnsafePutLongOrdered(HInvoke* invoke) { - CreateIntIntIntIntToVoid(arena_, invoke); + CreateIntIntIntIntToVoid(allocator_, invoke); } void IntrinsicLocationsBuilderARM64::VisitUnsafePutLongVolatile(HInvoke* invoke) { - CreateIntIntIntIntToVoid(arena_, invoke); + CreateIntIntIntIntToVoid(allocator_, invoke); } static void GenUnsafePut(HInvoke* invoke, @@ -1151,17 +1142,18 @@ void IntrinsicCodeGeneratorARM64::VisitUnsafePutLongVolatile(HInvoke* invoke) { codegen_); } -static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena, +static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator, HInvoke* invoke, DataType::Type type) { bool can_call = kEmitCompilerReadBarrier && kUseBakerReadBarrier && (invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject); - LocationSummary* locations = new (arena) LocationSummary(invoke, - (can_call - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall), - kIntrinsified); + LocationSummary* locations = + new (allocator) LocationSummary(invoke, + can_call + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall, + kIntrinsified); locations->SetInAt(0, Location::NoLocation()); // Unused receiver. locations->SetInAt(1, Location::RequiresRegister()); locations->SetInAt(2, Location::RequiresRegister()); @@ -1265,10 +1257,10 @@ static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorARM64* cod } void IntrinsicLocationsBuilderARM64::VisitUnsafeCASInt(HInvoke* invoke) { - CreateIntIntIntIntIntToInt(arena_, invoke, DataType::Type::kInt32); + CreateIntIntIntIntIntToInt(allocator_, invoke, DataType::Type::kInt32); } void IntrinsicLocationsBuilderARM64::VisitUnsafeCASLong(HInvoke* invoke) { - CreateIntIntIntIntIntToInt(arena_, invoke, DataType::Type::kInt64); + CreateIntIntIntIntIntToInt(allocator_, invoke, DataType::Type::kInt64); } void IntrinsicLocationsBuilderARM64::VisitUnsafeCASObject(HInvoke* invoke) { // The only read barrier implementation supporting the @@ -1277,7 +1269,7 @@ void IntrinsicLocationsBuilderARM64::VisitUnsafeCASObject(HInvoke* invoke) { return; } - CreateIntIntIntIntIntToInt(arena_, invoke, DataType::Type::kReference); + CreateIntIntIntIntIntToInt(allocator_, invoke, DataType::Type::kReference); } void IntrinsicCodeGeneratorARM64::VisitUnsafeCASInt(HInvoke* invoke) { @@ -1295,11 +1287,12 @@ void IntrinsicCodeGeneratorARM64::VisitUnsafeCASObject(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitStringCompareTo(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - invoke->InputAt(1)->CanBeNull() - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, + invoke->InputAt(1)->CanBeNull() + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall, + kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); locations->AddTemp(Location::RequiresRegister()); @@ -1526,9 +1519,8 @@ static const char* GetConstString(HInstruction* candidate, uint32_t* utf16_lengt } void IntrinsicLocationsBuilderARM64::VisitStringEquals(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); @@ -1754,9 +1746,8 @@ static void GenerateVisitStringIndexOf(HInvoke* invoke, } void IntrinsicLocationsBuilderARM64::VisitStringIndexOf(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainAndSlowPath, - kIntrinsified); + LocationSummary* locations = new (allocator_) LocationSummary( + invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); // We have a hand-crafted assembly stub that follows the runtime calling convention. So it's // best to align the inputs accordingly. InvokeRuntimeCallingConvention calling_convention; @@ -1774,9 +1765,8 @@ void IntrinsicCodeGeneratorARM64::VisitStringIndexOf(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitStringIndexOfAfter(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainAndSlowPath, - kIntrinsified); + LocationSummary* locations = new (allocator_) LocationSummary( + invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); // We have a hand-crafted assembly stub that follows the runtime calling convention. So it's // best to align the inputs accordingly. InvokeRuntimeCallingConvention calling_convention; @@ -1792,9 +1782,8 @@ void IntrinsicCodeGeneratorARM64::VisitStringIndexOfAfter(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromBytes(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainAndSlowPath, - kIntrinsified); + LocationSummary* locations = new (allocator_) LocationSummary( + invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); @@ -1819,9 +1808,8 @@ void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromBytes(HInvoke* invoke) } void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromChars(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainOnly, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); @@ -1841,9 +1829,8 @@ void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromChars(HInvoke* invoke) } void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromString(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainAndSlowPath, - kIntrinsified); + LocationSummary* locations = new (allocator_) LocationSummary( + invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kReference)); @@ -1864,29 +1851,27 @@ void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromString(HInvoke* invoke __ Bind(slow_path->GetExitLabel()); } -static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) { +static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) { DCHECK_EQ(invoke->GetNumberOfArguments(), 1U); DCHECK(DataType::IsFloatingPointType(invoke->InputAt(0)->GetType())); DCHECK(DataType::IsFloatingPointType(invoke->GetType())); - LocationSummary* const locations = new (arena) LocationSummary(invoke, - LocationSummary::kCallOnMainOnly, - kIntrinsified); + LocationSummary* const locations = + new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0))); locations->SetOut(calling_convention.GetReturnLocation(invoke->GetType())); } -static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) { +static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) { DCHECK_EQ(invoke->GetNumberOfArguments(), 2U); DCHECK(DataType::IsFloatingPointType(invoke->InputAt(0)->GetType())); DCHECK(DataType::IsFloatingPointType(invoke->InputAt(1)->GetType())); DCHECK(DataType::IsFloatingPointType(invoke->GetType())); - LocationSummary* const locations = new (arena) LocationSummary(invoke, - LocationSummary::kCallOnMainOnly, - kIntrinsified); + LocationSummary* const locations = + new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0))); @@ -1901,7 +1886,7 @@ static void GenFPToFPCall(HInvoke* invoke, } void IntrinsicLocationsBuilderARM64::VisitMathCos(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathCos(HInvoke* invoke) { @@ -1909,7 +1894,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathCos(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathSin(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathSin(HInvoke* invoke) { @@ -1917,7 +1902,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathSin(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathAcos(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathAcos(HInvoke* invoke) { @@ -1925,7 +1910,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathAcos(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathAsin(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathAsin(HInvoke* invoke) { @@ -1933,7 +1918,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathAsin(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathAtan(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathAtan(HInvoke* invoke) { @@ -1941,7 +1926,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathAtan(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathCbrt(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathCbrt(HInvoke* invoke) { @@ -1949,7 +1934,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathCbrt(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathCosh(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathCosh(HInvoke* invoke) { @@ -1957,7 +1942,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathCosh(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathExp(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathExp(HInvoke* invoke) { @@ -1965,7 +1950,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathExp(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathExpm1(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathExpm1(HInvoke* invoke) { @@ -1973,7 +1958,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathExpm1(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathLog(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathLog(HInvoke* invoke) { @@ -1981,7 +1966,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathLog(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathLog10(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathLog10(HInvoke* invoke) { @@ -1989,7 +1974,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathLog10(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathSinh(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathSinh(HInvoke* invoke) { @@ -1997,7 +1982,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathSinh(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathTan(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathTan(HInvoke* invoke) { @@ -2005,7 +1990,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathTan(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathTanh(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathTanh(HInvoke* invoke) { @@ -2013,7 +1998,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathTanh(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathAtan2(HInvoke* invoke) { - CreateFPFPToFPCallLocations(arena_, invoke); + CreateFPFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathAtan2(HInvoke* invoke) { @@ -2021,7 +2006,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathAtan2(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathHypot(HInvoke* invoke) { - CreateFPFPToFPCallLocations(arena_, invoke); + CreateFPFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathHypot(HInvoke* invoke) { @@ -2029,7 +2014,7 @@ void IntrinsicCodeGeneratorARM64::VisitMathHypot(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitMathNextAfter(HInvoke* invoke) { - CreateFPFPToFPCallLocations(arena_, invoke); + CreateFPFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitMathNextAfter(HInvoke* invoke) { @@ -2037,9 +2022,8 @@ void IntrinsicCodeGeneratorARM64::VisitMathNextAfter(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitStringGetCharsNoCheck(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); locations->SetInAt(2, Location::RequiresRegister()); @@ -2189,10 +2173,9 @@ void IntrinsicLocationsBuilderARM64::VisitSystemArrayCopyChar(HInvoke* invoke) { } } - ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena(); - LocationSummary* locations = new (allocator) LocationSummary(invoke, - LocationSummary::kCallOnSlowPath, - kIntrinsified); + ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator(); + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified); // arraycopy(char[] src, int src_pos, char[] dst, int dst_pos, int length). locations->SetInAt(0, Location::RequiresRegister()); SetSystemArrayCopyLocationRequires(locations, 1, invoke->InputAt(1)); @@ -2428,10 +2411,9 @@ void IntrinsicLocationsBuilderARM64::VisitSystemArrayCopy(HInvoke* invoke) { return; } - ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena(); - LocationSummary* locations = new (allocator) LocationSummary(invoke, - LocationSummary::kCallOnSlowPath, - kIntrinsified); + ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator(); + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified); // arraycopy(Object src, int src_pos, Object dest, int dest_pos, int length). locations->SetInAt(0, Location::RequiresRegister()); SetSystemArrayCopyLocationRequires(locations, 1, invoke->InputAt(1)); @@ -2937,7 +2919,7 @@ static void GenIsInfinite(LocationSummary* locations, } void IntrinsicLocationsBuilderARM64::VisitFloatIsInfinite(HInvoke* invoke) { - CreateFPToIntLocations(arena_, invoke); + CreateFPToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitFloatIsInfinite(HInvoke* invoke) { @@ -2945,7 +2927,7 @@ void IntrinsicCodeGeneratorARM64::VisitFloatIsInfinite(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitDoubleIsInfinite(HInvoke* invoke) { - CreateFPToIntLocations(arena_, invoke); + CreateFPToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARM64::VisitDoubleIsInfinite(HInvoke* invoke) { @@ -3026,9 +3008,8 @@ void IntrinsicCodeGeneratorARM64::VisitIntegerValueOf(HInvoke* invoke) { } void IntrinsicLocationsBuilderARM64::VisitThreadInterrupted(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetOut(Location::RequiresRegister()); } diff --git a/compiler/optimizing/intrinsics_arm64.h b/compiler/optimizing/intrinsics_arm64.h index 5a6d180ed6..3533c88c67 100644 --- a/compiler/optimizing/intrinsics_arm64.h +++ b/compiler/optimizing/intrinsics_arm64.h @@ -39,8 +39,8 @@ class CodeGeneratorARM64; class IntrinsicLocationsBuilderARM64 FINAL : public IntrinsicVisitor { public: - explicit IntrinsicLocationsBuilderARM64(ArenaAllocator* arena, CodeGeneratorARM64* codegen) - : arena_(arena), codegen_(codegen) {} + explicit IntrinsicLocationsBuilderARM64(ArenaAllocator* allocator, CodeGeneratorARM64* codegen) + : allocator_(allocator), codegen_(codegen) {} // Define visitor methods. @@ -57,7 +57,7 @@ class IntrinsicLocationsBuilderARM64 FINAL : public IntrinsicVisitor { bool TryDispatch(HInvoke* invoke); private: - ArenaAllocator* arena_; + ArenaAllocator* allocator_; CodeGeneratorARM64* codegen_; DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderARM64); diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc index 7ce576c307..332306bebf 100644 --- a/compiler/optimizing/intrinsics_arm_vixl.cc +++ b/compiler/optimizing/intrinsics_arm_vixl.cc @@ -65,7 +65,7 @@ ArmVIXLAssembler* IntrinsicCodeGeneratorARMVIXL::GetAssembler() { } ArenaAllocator* IntrinsicCodeGeneratorARMVIXL::GetAllocator() { - return codegen_->GetGraph()->GetArena(); + return codegen_->GetGraph()->GetAllocator(); } // Default slow-path for fallback (calling the managed code to handle the intrinsic) in an @@ -246,7 +246,7 @@ class ReadBarrierSystemArrayCopySlowPathARMVIXL : public SlowPathCodeARMVIXL { }; IntrinsicLocationsBuilderARMVIXL::IntrinsicLocationsBuilderARMVIXL(CodeGeneratorARMVIXL* codegen) - : arena_(codegen->GetGraph()->GetArena()), + : allocator_(codegen->GetGraph()->GetAllocator()), codegen_(codegen), assembler_(codegen->GetAssembler()), features_(codegen->GetInstructionSetFeatures()) {} @@ -260,18 +260,16 @@ bool IntrinsicLocationsBuilderARMVIXL::TryDispatch(HInvoke* invoke) { return res->Intrinsified(); } -static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); } -static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresFpuRegister()); } @@ -297,10 +295,10 @@ static void MoveIntToFP(LocationSummary* locations, bool is64bit, ArmVIXLAssembl } void IntrinsicLocationsBuilderARMVIXL::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) { - CreateFPToIntLocations(arena_, invoke); + CreateFPToIntLocations(allocator_, invoke); } void IntrinsicLocationsBuilderARMVIXL::VisitDoubleLongBitsToDouble(HInvoke* invoke) { - CreateIntToFPLocations(arena_, invoke); + CreateIntToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) { @@ -311,10 +309,10 @@ void IntrinsicCodeGeneratorARMVIXL::VisitDoubleLongBitsToDouble(HInvoke* invoke) } void IntrinsicLocationsBuilderARMVIXL::VisitFloatFloatToRawIntBits(HInvoke* invoke) { - CreateFPToIntLocations(arena_, invoke); + CreateFPToIntLocations(allocator_, invoke); } void IntrinsicLocationsBuilderARMVIXL::VisitFloatIntBitsToFloat(HInvoke* invoke) { - CreateIntToFPLocations(arena_, invoke); + CreateIntToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitFloatFloatToRawIntBits(HInvoke* invoke) { @@ -324,26 +322,23 @@ void IntrinsicCodeGeneratorARMVIXL::VisitFloatIntBitsToFloat(HInvoke* invoke) { MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); } -static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); } -static void CreateLongToLongLocationsWithOverlap(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateLongToLongLocationsWithOverlap(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); } -static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); } @@ -376,7 +371,7 @@ static void GenNumberOfLeadingZeros(HInvoke* invoke, } void IntrinsicLocationsBuilderARMVIXL::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { @@ -384,7 +379,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitIntegerNumberOfLeadingZeros(HInvoke* in } void IntrinsicLocationsBuilderARMVIXL::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { - CreateLongToLongLocationsWithOverlap(arena_, invoke); + CreateLongToLongLocationsWithOverlap(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { @@ -422,7 +417,7 @@ static void GenNumberOfTrailingZeros(HInvoke* invoke, } void IntrinsicLocationsBuilderARMVIXL::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) { @@ -430,7 +425,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitIntegerNumberOfTrailingZeros(HInvoke* i } void IntrinsicLocationsBuilderARMVIXL::VisitLongNumberOfTrailingZeros(HInvoke* invoke) { - CreateLongToLongLocationsWithOverlap(arena_, invoke); + CreateLongToLongLocationsWithOverlap(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitLongNumberOfTrailingZeros(HInvoke* invoke) { @@ -442,7 +437,7 @@ static void MathAbsFP(HInvoke* invoke, ArmVIXLAssembler* assembler) { } void IntrinsicLocationsBuilderARMVIXL::VisitMathAbsDouble(HInvoke* invoke) { - CreateFPToFPLocations(arena_, invoke); + CreateFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsDouble(HInvoke* invoke) { @@ -450,17 +445,16 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsDouble(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMathAbsFloat(HInvoke* invoke) { - CreateFPToFPLocations(arena_, invoke); + CreateFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsFloat(HInvoke* invoke) { MathAbsFP(invoke, GetAssembler()); } -static void CreateIntToIntPlusTemp(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateIntToIntPlusTemp(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); @@ -499,7 +493,7 @@ static void GenAbsInteger(LocationSummary* locations, } void IntrinsicLocationsBuilderARMVIXL::VisitMathAbsInt(HInvoke* invoke) { - CreateIntToIntPlusTemp(arena_, invoke); + CreateIntToIntPlusTemp(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsInt(HInvoke* invoke) { @@ -508,7 +502,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsInt(HInvoke* invoke) { void IntrinsicLocationsBuilderARMVIXL::VisitMathAbsLong(HInvoke* invoke) { - CreateIntToIntPlusTemp(arena_, invoke); + CreateIntToIntPlusTemp(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsLong(HInvoke* invoke) { @@ -575,17 +569,16 @@ static void GenMinMaxFloat(HInvoke* invoke, bool is_min, CodeGeneratorARMVIXL* c } } -static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetOut(Location::SameAsFirstInput()); } void IntrinsicLocationsBuilderARMVIXL::VisitMathMinFloatFloat(HInvoke* invoke) { - CreateFPFPToFPLocations(arena_, invoke); + CreateFPFPToFPLocations(allocator_, invoke); invoke->GetLocations()->AddTemp(Location::RequiresRegister()); } @@ -594,7 +587,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathMinFloatFloat(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxFloatFloat(HInvoke* invoke) { - CreateFPFPToFPLocations(arena_, invoke); + CreateFPFPToFPLocations(allocator_, invoke); invoke->GetLocations()->AddTemp(Location::RequiresRegister()); } @@ -654,7 +647,7 @@ static void GenMinMaxDouble(HInvoke* invoke, bool is_min, CodeGeneratorARMVIXL* } void IntrinsicLocationsBuilderARMVIXL::VisitMathMinDoubleDouble(HInvoke* invoke) { - CreateFPFPToFPLocations(arena_, invoke); + CreateFPFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMathMinDoubleDouble(HInvoke* invoke) { @@ -662,7 +655,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathMinDoubleDouble(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxDoubleDouble(HInvoke* invoke) { - CreateFPFPToFPLocations(arena_, invoke); + CreateFPFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxDoubleDouble(HInvoke* invoke) { @@ -708,17 +701,16 @@ static void GenMinMaxLong(HInvoke* invoke, bool is_min, ArmVIXLAssembler* assemb } } -static void CreateLongLongToLongLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateLongLongToLongLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); locations->SetOut(Location::SameAsFirstInput()); } void IntrinsicLocationsBuilderARMVIXL::VisitMathMinLongLong(HInvoke* invoke) { - CreateLongLongToLongLocations(arena_, invoke); + CreateLongLongToLongLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMathMinLongLong(HInvoke* invoke) { @@ -726,7 +718,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathMinLongLong(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxLongLong(HInvoke* invoke) { - CreateLongLongToLongLocations(arena_, invoke); + CreateLongLongToLongLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxLongLong(HInvoke* invoke) { @@ -751,17 +743,16 @@ static void GenMinMax(HInvoke* invoke, bool is_min, ArmVIXLAssembler* assembler) } } -static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); } void IntrinsicLocationsBuilderARMVIXL::VisitMathMinIntInt(HInvoke* invoke) { - CreateIntIntToIntLocations(arena_, invoke); + CreateIntIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMathMinIntInt(HInvoke* invoke) { @@ -769,7 +760,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathMinIntInt(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxIntInt(HInvoke* invoke) { - CreateIntIntToIntLocations(arena_, invoke); + CreateIntIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxIntInt(HInvoke* invoke) { @@ -777,7 +768,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxIntInt(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMathSqrt(HInvoke* invoke) { - CreateFPToFPLocations(arena_, invoke); + CreateFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMathSqrt(HInvoke* invoke) { @@ -787,7 +778,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathSqrt(HInvoke* invoke) { void IntrinsicLocationsBuilderARMVIXL::VisitMathRint(HInvoke* invoke) { if (features_.HasARMv8AInstructions()) { - CreateFPToFPLocations(arena_, invoke); + CreateFPToFPLocations(allocator_, invoke); } } @@ -799,9 +790,8 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathRint(HInvoke* invoke) { void IntrinsicLocationsBuilderARMVIXL::VisitMathRoundFloat(HInvoke* invoke) { if (features_.HasARMv8AInstructions()) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); locations->AddTemp(Location::RequiresFpuRegister()); @@ -850,7 +840,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathRoundFloat(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekByte(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekByte(HInvoke* invoke) { @@ -860,7 +850,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekByte(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekIntNative(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekIntNative(HInvoke* invoke) { @@ -870,7 +860,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekIntNative(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekLongNative(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekLongNative(HInvoke* invoke) { @@ -891,7 +881,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekLongNative(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekShortNative(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekShortNative(HInvoke* invoke) { @@ -900,16 +890,15 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekShortNative(HInvoke* invoke) __ Ldrsh(OutputRegister(invoke), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0)))); } -static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); } void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeByte(HInvoke* invoke) { - CreateIntIntToVoidLocations(arena_, invoke); + CreateIntIntToVoidLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeByte(HInvoke* invoke) { @@ -918,7 +907,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeByte(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeIntNative(HInvoke* invoke) { - CreateIntIntToVoidLocations(arena_, invoke); + CreateIntIntToVoidLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeIntNative(HInvoke* invoke) { @@ -927,7 +916,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeIntNative(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeLongNative(HInvoke* invoke) { - CreateIntIntToVoidLocations(arena_, invoke); + CreateIntIntToVoidLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeLongNative(HInvoke* invoke) { @@ -941,7 +930,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeLongNative(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeShortNative(HInvoke* invoke) { - CreateIntIntToVoidLocations(arena_, invoke); + CreateIntIntToVoidLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeShortNative(HInvoke* invoke) { @@ -950,9 +939,8 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeShortNative(HInvoke* invoke) } void IntrinsicLocationsBuilderARMVIXL::VisitThreadCurrentThread(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetOut(Location::RequiresRegister()); } @@ -1034,17 +1022,18 @@ static void GenUnsafeGet(HInvoke* invoke, } } -static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, +static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke, DataType::Type type) { bool can_call = kEmitCompilerReadBarrier && (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject || invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile); - LocationSummary* locations = new (arena) LocationSummary(invoke, - (can_call - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall), - kIntrinsified); + LocationSummary* locations = + new (allocator) LocationSummary(invoke, + can_call + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall, + kIntrinsified); if (can_call && kUseBakerReadBarrier) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } @@ -1061,22 +1050,22 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGet(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32); + CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32); + CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetLong(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64); + CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetLongVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64); + CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetObject(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference); + CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference); + CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGet(HInvoke* invoke) { @@ -1098,14 +1087,13 @@ void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetObjectVolatile(HInvoke* invoke GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_); } -static void CreateIntIntIntIntToVoid(ArenaAllocator* arena, +static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator, const ArmInstructionSetFeatures& features, DataType::Type type, bool is_volatile, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::NoLocation()); // Unused receiver. locations->SetInAt(1, Location::RequiresRegister()); locations->SetInAt(2, Location::RequiresRegister()); @@ -1126,39 +1114,39 @@ static void CreateIntIntIntIntToVoid(ArenaAllocator* arena, void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePut(HInvoke* invoke) { CreateIntIntIntIntToVoid( - arena_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke); + allocator_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutOrdered(HInvoke* invoke) { CreateIntIntIntIntToVoid( - arena_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke); + allocator_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutVolatile(HInvoke* invoke) { CreateIntIntIntIntToVoid( - arena_, features_, DataType::Type::kInt32, /* is_volatile */ true, invoke); + allocator_, features_, DataType::Type::kInt32, /* is_volatile */ true, invoke); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObject(HInvoke* invoke) { CreateIntIntIntIntToVoid( - arena_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke); + allocator_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObjectOrdered(HInvoke* invoke) { CreateIntIntIntIntToVoid( - arena_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke); + allocator_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObjectVolatile(HInvoke* invoke) { CreateIntIntIntIntToVoid( - arena_, features_, DataType::Type::kReference, /* is_volatile */ true, invoke); + allocator_, features_, DataType::Type::kReference, /* is_volatile */ true, invoke); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLong(HInvoke* invoke) { CreateIntIntIntIntToVoid( - arena_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke); + allocator_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLongOrdered(HInvoke* invoke) { CreateIntIntIntIntToVoid( - arena_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke); + allocator_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLongVolatile(HInvoke* invoke) { CreateIntIntIntIntToVoid( - arena_, features_, DataType::Type::kInt64, /* is_volatile */ true, invoke); + allocator_, features_, DataType::Type::kInt64, /* is_volatile */ true, invoke); } static void GenUnsafePut(LocationSummary* locations, @@ -1284,17 +1272,18 @@ void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLongVolatile(HInvoke* invoke) codegen_); } -static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* arena, +static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* allocator, HInvoke* invoke, DataType::Type type) { bool can_call = kEmitCompilerReadBarrier && kUseBakerReadBarrier && (invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject); - LocationSummary* locations = new (arena) LocationSummary(invoke, - (can_call - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall), - kIntrinsified); + LocationSummary* locations = + new (allocator) LocationSummary(invoke, + can_call + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall, + kIntrinsified); locations->SetInAt(0, Location::NoLocation()); // Unused receiver. locations->SetInAt(1, Location::RequiresRegister()); locations->SetInAt(2, Location::RequiresRegister()); @@ -1427,7 +1416,7 @@ static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorARMVIXL* c } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeCASInt(HInvoke* invoke) { - CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke, DataType::Type::kInt32); + CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke, DataType::Type::kInt32); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeCASObject(HInvoke* invoke) { // The only read barrier implementation supporting the @@ -1436,7 +1425,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeCASObject(HInvoke* invoke) { return; } - CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke, DataType::Type::kReference); + CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke, DataType::Type::kReference); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeCASInt(HInvoke* invoke) { GenCas(invoke, DataType::Type::kInt32, codegen_); @@ -1451,11 +1440,12 @@ void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeCASObject(HInvoke* invoke) { void IntrinsicLocationsBuilderARMVIXL::VisitStringCompareTo(HInvoke* invoke) { // The inputs plus one temp. - LocationSummary* locations = new (arena_) LocationSummary(invoke, - invoke->InputAt(1)->CanBeNull() - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, + invoke->InputAt(1)->CanBeNull() + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall, + kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); locations->AddTemp(Location::RequiresRegister()); @@ -1733,9 +1723,8 @@ static const char* GetConstString(HInstruction* candidate, uint32_t* utf16_lengt } void IntrinsicLocationsBuilderARMVIXL::VisitStringEquals(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); InvokeRuntimeCallingConventionARMVIXL calling_convention; locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); @@ -1974,9 +1963,8 @@ static void GenerateVisitStringIndexOf(HInvoke* invoke, } void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOf(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainAndSlowPath, - kIntrinsified); + LocationSummary* locations = new (allocator_) LocationSummary( + invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); // We have a hand-crafted assembly stub that follows the runtime calling convention. So it's // best to align the inputs accordingly. InvokeRuntimeCallingConventionARMVIXL calling_convention; @@ -1994,9 +1982,8 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringIndexOf(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainAndSlowPath, - kIntrinsified); + LocationSummary* locations = new (allocator_) LocationSummary( + invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); // We have a hand-crafted assembly stub that follows the runtime calling convention. So it's // best to align the inputs accordingly. InvokeRuntimeCallingConventionARMVIXL calling_convention; @@ -2012,9 +1999,8 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitStringNewStringFromBytes(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainAndSlowPath, - kIntrinsified); + LocationSummary* locations = new (allocator_) LocationSummary( + invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); InvokeRuntimeCallingConventionARMVIXL calling_convention; locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); @@ -2037,9 +2023,8 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringNewStringFromBytes(HInvoke* invok } void IntrinsicLocationsBuilderARMVIXL::VisitStringNewStringFromChars(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainOnly, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified); InvokeRuntimeCallingConventionARMVIXL calling_convention; locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); @@ -2059,9 +2044,8 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringNewStringFromChars(HInvoke* invok } void IntrinsicLocationsBuilderARMVIXL::VisitStringNewStringFromString(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainAndSlowPath, - kIntrinsified); + LocationSummary* locations = new (allocator_) LocationSummary( + invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); InvokeRuntimeCallingConventionARMVIXL calling_convention; locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); locations->SetOut(LocationFrom(r0)); @@ -2571,7 +2555,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) { __ Bind(intrinsic_slow_path->GetExitLabel()); } -static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) { +static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) { // If the graph is debuggable, all callee-saved floating-point registers are blocked by // the code generator. Furthermore, the register allocator creates fixed live intervals // for all caller-saved registers because we are doing a function call. As a result, if @@ -2585,9 +2569,8 @@ static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) { DCHECK_EQ(invoke->InputAt(0)->GetType(), DataType::Type::kFloat64); DCHECK_EQ(invoke->GetType(), DataType::Type::kFloat64); - LocationSummary* const locations = new (arena) LocationSummary(invoke, - LocationSummary::kCallOnMainOnly, - kIntrinsified); + LocationSummary* const locations = + new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified); const InvokeRuntimeCallingConventionARMVIXL calling_convention; locations->SetInAt(0, Location::RequiresFpuRegister()); @@ -2597,7 +2580,7 @@ static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) { locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(1))); } -static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) { +static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) { // If the graph is debuggable, all callee-saved floating-point registers are blocked by // the code generator. Furthermore, the register allocator creates fixed live intervals // for all caller-saved registers because we are doing a function call. As a result, if @@ -2612,9 +2595,8 @@ static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) DCHECK_EQ(invoke->InputAt(1)->GetType(), DataType::Type::kFloat64); DCHECK_EQ(invoke->GetType(), DataType::Type::kFloat64); - LocationSummary* const locations = new (arena) LocationSummary(invoke, - LocationSummary::kCallOnMainOnly, - kIntrinsified); + LocationSummary* const locations = + new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified); const InvokeRuntimeCallingConventionARMVIXL calling_convention; locations->SetInAt(0, Location::RequiresFpuRegister()); @@ -2669,7 +2651,7 @@ static void GenFPFPToFPCall(HInvoke* invoke, } void IntrinsicLocationsBuilderARMVIXL::VisitMathCos(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMathCos(HInvoke* invoke) { @@ -2677,7 +2659,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathCos(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMathSin(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMathSin(HInvoke* invoke) { @@ -2685,7 +2667,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathSin(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMathAcos(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMathAcos(HInvoke* invoke) { @@ -2693,7 +2675,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathAcos(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMathAsin(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMathAsin(HInvoke* invoke) { @@ -2701,7 +2683,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathAsin(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMathAtan(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMathAtan(HInvoke* invoke) { @@ -2709,7 +2691,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathAtan(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMathCbrt(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMathCbrt(HInvoke* invoke) { @@ -2717,7 +2699,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathCbrt(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMathCosh(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMathCosh(HInvoke* invoke) { @@ -2725,7 +2707,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathCosh(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMathExp(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMathExp(HInvoke* invoke) { @@ -2733,7 +2715,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathExp(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMathExpm1(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMathExpm1(HInvoke* invoke) { @@ -2741,7 +2723,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathExpm1(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMathLog(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMathLog(HInvoke* invoke) { @@ -2749,7 +2731,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathLog(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMathLog10(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMathLog10(HInvoke* invoke) { @@ -2757,7 +2739,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathLog10(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMathSinh(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMathSinh(HInvoke* invoke) { @@ -2765,7 +2747,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathSinh(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMathTan(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMathTan(HInvoke* invoke) { @@ -2773,7 +2755,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathTan(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMathTanh(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMathTanh(HInvoke* invoke) { @@ -2781,7 +2763,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathTanh(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMathAtan2(HInvoke* invoke) { - CreateFPFPToFPCallLocations(arena_, invoke); + CreateFPFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMathAtan2(HInvoke* invoke) { @@ -2789,7 +2771,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathAtan2(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMathHypot(HInvoke* invoke) { - CreateFPFPToFPCallLocations(arena_, invoke); + CreateFPFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMathHypot(HInvoke* invoke) { @@ -2797,7 +2779,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathHypot(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitMathNextAfter(HInvoke* invoke) { - CreateFPFPToFPCallLocations(arena_, invoke); + CreateFPFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitMathNextAfter(HInvoke* invoke) { @@ -2805,7 +2787,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathNextAfter(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitIntegerReverse(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitIntegerReverse(HInvoke* invoke) { @@ -2814,7 +2796,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitIntegerReverse(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitLongReverse(HInvoke* invoke) { - CreateLongToLongLocationsWithOverlap(arena_, invoke); + CreateLongToLongLocationsWithOverlap(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitLongReverse(HInvoke* invoke) { @@ -2831,7 +2813,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitLongReverse(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitIntegerReverseBytes(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitIntegerReverseBytes(HInvoke* invoke) { @@ -2840,7 +2822,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitIntegerReverseBytes(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitLongReverseBytes(HInvoke* invoke) { - CreateLongToLongLocationsWithOverlap(arena_, invoke); + CreateLongToLongLocationsWithOverlap(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitLongReverseBytes(HInvoke* invoke) { @@ -2857,7 +2839,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitLongReverseBytes(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitShortReverseBytes(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitShortReverseBytes(HInvoke* invoke) { @@ -2894,7 +2876,7 @@ static void GenBitCount(HInvoke* instr, DataType::Type type, ArmVIXLAssembler* a } void IntrinsicLocationsBuilderARMVIXL::VisitIntegerBitCount(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); invoke->GetLocations()->AddTemp(Location::RequiresFpuRegister()); } @@ -2961,7 +2943,7 @@ static void GenHighestOneBit(HInvoke* invoke, } void IntrinsicLocationsBuilderARMVIXL::VisitIntegerHighestOneBit(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitIntegerHighestOneBit(HInvoke* invoke) { @@ -2969,7 +2951,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitIntegerHighestOneBit(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitLongHighestOneBit(HInvoke* invoke) { - CreateLongToLongLocationsWithOverlap(arena_, invoke); + CreateLongToLongLocationsWithOverlap(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitLongHighestOneBit(HInvoke* invoke) { @@ -3026,7 +3008,7 @@ static void GenLowestOneBit(HInvoke* invoke, } void IntrinsicLocationsBuilderARMVIXL::VisitIntegerLowestOneBit(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitIntegerLowestOneBit(HInvoke* invoke) { @@ -3034,7 +3016,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitIntegerLowestOneBit(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitLongLowestOneBit(HInvoke* invoke) { - CreateLongToLongLocationsWithOverlap(arena_, invoke); + CreateLongToLongLocationsWithOverlap(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitLongLowestOneBit(HInvoke* invoke) { @@ -3042,9 +3024,8 @@ void IntrinsicCodeGeneratorARMVIXL::VisitLongLowestOneBit(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); locations->SetInAt(2, Location::RequiresRegister()); @@ -3170,7 +3151,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke) } void IntrinsicLocationsBuilderARMVIXL::VisitFloatIsInfinite(HInvoke* invoke) { - CreateFPToIntLocations(arena_, invoke); + CreateFPToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitFloatIsInfinite(HInvoke* invoke) { @@ -3188,7 +3169,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitFloatIsInfinite(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitDoubleIsInfinite(HInvoke* invoke) { - CreateFPToIntLocations(arena_, invoke); + CreateFPToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorARMVIXL::VisitDoubleIsInfinite(HInvoke* invoke) { @@ -3215,7 +3196,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitDoubleIsInfinite(HInvoke* invoke) { void IntrinsicLocationsBuilderARMVIXL::VisitMathCeil(HInvoke* invoke) { if (features_.HasARMv8AInstructions()) { - CreateFPToFPLocations(arena_, invoke); + CreateFPToFPLocations(allocator_, invoke); } } @@ -3227,7 +3208,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathCeil(HInvoke* invoke) { void IntrinsicLocationsBuilderARMVIXL::VisitMathFloor(HInvoke* invoke) { if (features_.HasARMv8AInstructions()) { - CreateFPToFPLocations(arena_, invoke); + CreateFPToFPLocations(allocator_, invoke); } } @@ -3309,9 +3290,8 @@ void IntrinsicCodeGeneratorARMVIXL::VisitIntegerValueOf(HInvoke* invoke) { } void IntrinsicLocationsBuilderARMVIXL::VisitThreadInterrupted(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetOut(Location::RequiresRegister()); } diff --git a/compiler/optimizing/intrinsics_arm_vixl.h b/compiler/optimizing/intrinsics_arm_vixl.h index a4a2830211..4f18ca3fc1 100644 --- a/compiler/optimizing/intrinsics_arm_vixl.h +++ b/compiler/optimizing/intrinsics_arm_vixl.h @@ -46,7 +46,7 @@ class IntrinsicLocationsBuilderARMVIXL FINAL : public IntrinsicVisitor { bool TryDispatch(HInvoke* invoke); private: - ArenaAllocator* arena_; + ArenaAllocator* allocator_; CodeGenerator* codegen_; ArmVIXLAssembler* assembler_; const ArmInstructionSetFeatures& features_; diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc index 8847256532..5f2f71bd4d 100644 --- a/compiler/optimizing/intrinsics_mips.cc +++ b/compiler/optimizing/intrinsics_mips.cc @@ -35,7 +35,7 @@ namespace art { namespace mips { IntrinsicLocationsBuilderMIPS::IntrinsicLocationsBuilderMIPS(CodeGeneratorMIPS* codegen) - : codegen_(codegen), arena_(codegen->GetGraph()->GetArena()) { + : codegen_(codegen), allocator_(codegen->GetGraph()->GetAllocator()) { } MipsAssembler* IntrinsicCodeGeneratorMIPS::GetAssembler() { @@ -43,7 +43,7 @@ MipsAssembler* IntrinsicCodeGeneratorMIPS::GetAssembler() { } ArenaAllocator* IntrinsicCodeGeneratorMIPS::GetAllocator() { - return codegen_->GetGraph()->GetArena(); + return codegen_->GetGraph()->GetAllocator(); } inline bool IntrinsicCodeGeneratorMIPS::IsR2OrNewer() const { @@ -152,10 +152,9 @@ bool IntrinsicLocationsBuilderMIPS::TryDispatch(HInvoke* invoke) { #define __ assembler-> -static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); } @@ -178,7 +177,7 @@ static void MoveFPToInt(LocationSummary* locations, bool is64bit, MipsAssembler* // long java.lang.Double.doubleToRawLongBits(double) void IntrinsicLocationsBuilderMIPS::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) { - CreateFPToIntLocations(arena_, invoke); + CreateFPToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) { @@ -187,17 +186,16 @@ void IntrinsicCodeGeneratorMIPS::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) // int java.lang.Float.floatToRawIntBits(float) void IntrinsicLocationsBuilderMIPS::VisitFloatFloatToRawIntBits(HInvoke* invoke) { - CreateFPToIntLocations(arena_, invoke); + CreateFPToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitFloatFloatToRawIntBits(HInvoke* invoke) { MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); } -static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresFpuRegister()); } @@ -220,7 +218,7 @@ static void MoveIntToFP(LocationSummary* locations, bool is64bit, MipsAssembler* // double java.lang.Double.longBitsToDouble(long) void IntrinsicLocationsBuilderMIPS::VisitDoubleLongBitsToDouble(HInvoke* invoke) { - CreateIntToFPLocations(arena_, invoke); + CreateIntToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitDoubleLongBitsToDouble(HInvoke* invoke) { @@ -229,19 +227,18 @@ void IntrinsicCodeGeneratorMIPS::VisitDoubleLongBitsToDouble(HInvoke* invoke) { // float java.lang.Float.intBitsToFloat(int) void IntrinsicLocationsBuilderMIPS::VisitFloatIntBitsToFloat(HInvoke* invoke) { - CreateIntToFPLocations(arena_, invoke); + CreateIntToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitFloatIntBitsToFloat(HInvoke* invoke) { MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); } -static void CreateIntToIntLocations(ArenaAllocator* arena, +static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke, Location::OutputOverlap overlaps = Location::kNoOutputOverlap) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), overlaps); } @@ -402,7 +399,7 @@ static void GenReverse(LocationSummary* locations, // int java.lang.Integer.reverseBytes(int) void IntrinsicLocationsBuilderMIPS::VisitIntegerReverseBytes(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitIntegerReverseBytes(HInvoke* invoke) { @@ -416,7 +413,7 @@ void IntrinsicCodeGeneratorMIPS::VisitIntegerReverseBytes(HInvoke* invoke) { // long java.lang.Long.reverseBytes(long) void IntrinsicLocationsBuilderMIPS::VisitLongReverseBytes(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitLongReverseBytes(HInvoke* invoke) { @@ -430,7 +427,7 @@ void IntrinsicCodeGeneratorMIPS::VisitLongReverseBytes(HInvoke* invoke) { // short java.lang.Short.reverseBytes(short) void IntrinsicLocationsBuilderMIPS::VisitShortReverseBytes(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitShortReverseBytes(HInvoke* invoke) { @@ -474,7 +471,7 @@ static void GenNumberOfLeadingZeroes(LocationSummary* locations, // int java.lang.Integer.numberOfLeadingZeros(int i) void IntrinsicLocationsBuilderMIPS::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { @@ -483,7 +480,7 @@ void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfLeadingZeros(HInvoke* invok // int java.lang.Long.numberOfLeadingZeros(long i) void IntrinsicLocationsBuilderMIPS::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { @@ -561,7 +558,7 @@ static void GenNumberOfTrailingZeroes(LocationSummary* locations, // int java.lang.Integer.numberOfTrailingZeros(int i) void IntrinsicLocationsBuilderMIPS::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke, Location::kOutputOverlap); + CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap); } void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) { @@ -570,7 +567,7 @@ void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfTrailingZeros(HInvoke* invo // int java.lang.Long.numberOfTrailingZeros(long i) void IntrinsicLocationsBuilderMIPS::VisitLongNumberOfTrailingZeros(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke, Location::kOutputOverlap); + CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap); } void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfTrailingZeros(HInvoke* invoke) { @@ -579,7 +576,7 @@ void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfTrailingZeros(HInvoke* invoke) // int java.lang.Integer.reverse(int) void IntrinsicLocationsBuilderMIPS::VisitIntegerReverse(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitIntegerReverse(HInvoke* invoke) { @@ -593,7 +590,7 @@ void IntrinsicCodeGeneratorMIPS::VisitIntegerReverse(HInvoke* invoke) { // long java.lang.Long.reverse(long) void IntrinsicLocationsBuilderMIPS::VisitLongReverse(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitLongReverse(HInvoke* invoke) { @@ -605,10 +602,9 @@ void IntrinsicCodeGeneratorMIPS::VisitLongReverse(HInvoke* invoke) { GetAssembler()); } -static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); } @@ -725,7 +721,7 @@ static void GenBitCount(LocationSummary* locations, // int java.lang.Integer.bitCount(int) void IntrinsicLocationsBuilderMIPS::VisitIntegerBitCount(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitIntegerBitCount(HInvoke* invoke) { @@ -734,9 +730,8 @@ void IntrinsicCodeGeneratorMIPS::VisitIntegerBitCount(HInvoke* invoke) { // int java.lang.Long.bitCount(int) void IntrinsicLocationsBuilderMIPS::VisitLongBitCount(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister()); locations->AddTemp(Location::RequiresRegister()); @@ -801,7 +796,7 @@ static void MathAbsFP(LocationSummary* locations, // double java.lang.Math.abs(double) void IntrinsicLocationsBuilderMIPS::VisitMathAbsDouble(HInvoke* invoke) { - CreateFPToFPLocations(arena_, invoke); + CreateFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathAbsDouble(HInvoke* invoke) { @@ -810,7 +805,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathAbsDouble(HInvoke* invoke) { // float java.lang.Math.abs(float) void IntrinsicLocationsBuilderMIPS::VisitMathAbsFloat(HInvoke* invoke) { - CreateFPToFPLocations(arena_, invoke); + CreateFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathAbsFloat(HInvoke* invoke) { @@ -847,7 +842,7 @@ static void GenAbsInteger(LocationSummary* locations, bool is64bit, MipsAssemble // int java.lang.Math.abs(int) void IntrinsicLocationsBuilderMIPS::VisitMathAbsInt(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathAbsInt(HInvoke* invoke) { @@ -856,7 +851,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathAbsInt(HInvoke* invoke) { // long java.lang.Math.abs(long) void IntrinsicLocationsBuilderMIPS::VisitMathAbsLong(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathAbsLong(HInvoke* invoke) { @@ -1026,10 +1021,9 @@ static void GenMinMaxFP(LocationSummary* locations, } } -static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kOutputOverlap); @@ -1037,7 +1031,7 @@ static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) { // double java.lang.Math.min(double, double) void IntrinsicLocationsBuilderMIPS::VisitMathMinDoubleDouble(HInvoke* invoke) { - CreateFPFPToFPLocations(arena_, invoke); + CreateFPFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathMinDoubleDouble(HInvoke* invoke) { @@ -1050,7 +1044,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathMinDoubleDouble(HInvoke* invoke) { // float java.lang.Math.min(float, float) void IntrinsicLocationsBuilderMIPS::VisitMathMinFloatFloat(HInvoke* invoke) { - CreateFPFPToFPLocations(arena_, invoke); + CreateFPFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathMinFloatFloat(HInvoke* invoke) { @@ -1063,7 +1057,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathMinFloatFloat(HInvoke* invoke) { // double java.lang.Math.max(double, double) void IntrinsicLocationsBuilderMIPS::VisitMathMaxDoubleDouble(HInvoke* invoke) { - CreateFPFPToFPLocations(arena_, invoke); + CreateFPFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathMaxDoubleDouble(HInvoke* invoke) { @@ -1076,7 +1070,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathMaxDoubleDouble(HInvoke* invoke) { // float java.lang.Math.max(float, float) void IntrinsicLocationsBuilderMIPS::VisitMathMaxFloatFloat(HInvoke* invoke) { - CreateFPFPToFPLocations(arena_, invoke); + CreateFPFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathMaxFloatFloat(HInvoke* invoke) { @@ -1087,10 +1081,9 @@ void IntrinsicCodeGeneratorMIPS::VisitMathMaxFloatFloat(HInvoke* invoke) { GetAssembler()); } -static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); @@ -1267,7 +1260,7 @@ static void GenMinMax(LocationSummary* locations, // int java.lang.Math.min(int, int) void IntrinsicLocationsBuilderMIPS::VisitMathMinIntInt(HInvoke* invoke) { - CreateIntIntToIntLocations(arena_, invoke); + CreateIntIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathMinIntInt(HInvoke* invoke) { @@ -1280,7 +1273,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathMinIntInt(HInvoke* invoke) { // long java.lang.Math.min(long, long) void IntrinsicLocationsBuilderMIPS::VisitMathMinLongLong(HInvoke* invoke) { - CreateIntIntToIntLocations(arena_, invoke); + CreateIntIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathMinLongLong(HInvoke* invoke) { @@ -1293,7 +1286,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathMinLongLong(HInvoke* invoke) { // int java.lang.Math.max(int, int) void IntrinsicLocationsBuilderMIPS::VisitMathMaxIntInt(HInvoke* invoke) { - CreateIntIntToIntLocations(arena_, invoke); + CreateIntIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathMaxIntInt(HInvoke* invoke) { @@ -1306,7 +1299,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathMaxIntInt(HInvoke* invoke) { // long java.lang.Math.max(long, long) void IntrinsicLocationsBuilderMIPS::VisitMathMaxLongLong(HInvoke* invoke) { - CreateIntIntToIntLocations(arena_, invoke); + CreateIntIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathMaxLongLong(HInvoke* invoke) { @@ -1319,7 +1312,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathMaxLongLong(HInvoke* invoke) { // double java.lang.Math.sqrt(double) void IntrinsicLocationsBuilderMIPS::VisitMathSqrt(HInvoke* invoke) { - CreateFPToFPLocations(arena_, invoke); + CreateFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathSqrt(HInvoke* invoke) { @@ -1333,7 +1326,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathSqrt(HInvoke* invoke) { // byte libcore.io.Memory.peekByte(long address) void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekByte(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekByte(HInvoke* invoke) { @@ -1346,7 +1339,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekByte(HInvoke* invoke) { // short libcore.io.Memory.peekShort(long address) void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekShortNative(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekShortNative(HInvoke* invoke) { @@ -1378,7 +1371,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekShortNative(HInvoke* invoke) { // int libcore.io.Memory.peekInt(long address) void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekIntNative(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke, Location::kOutputOverlap); + CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap); } void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekIntNative(HInvoke* invoke) { @@ -1396,7 +1389,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekIntNative(HInvoke* invoke) { // long libcore.io.Memory.peekLong(long address) void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekLongNative(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke, Location::kOutputOverlap); + CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap); } void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekLongNative(HInvoke* invoke) { @@ -1416,17 +1409,16 @@ void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekLongNative(HInvoke* invoke) { } } -static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); } // void libcore.io.Memory.pokeByte(long address, byte value) void IntrinsicLocationsBuilderMIPS::VisitMemoryPokeByte(HInvoke* invoke) { - CreateIntIntToVoidLocations(arena_, invoke); + CreateIntIntToVoidLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeByte(HInvoke* invoke) { @@ -1439,7 +1431,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeByte(HInvoke* invoke) { // void libcore.io.Memory.pokeShort(long address, short value) void IntrinsicLocationsBuilderMIPS::VisitMemoryPokeShortNative(HInvoke* invoke) { - CreateIntIntToVoidLocations(arena_, invoke); + CreateIntIntToVoidLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeShortNative(HInvoke* invoke) { @@ -1461,7 +1453,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeShortNative(HInvoke* invoke) { // void libcore.io.Memory.pokeInt(long address, int value) void IntrinsicLocationsBuilderMIPS::VisitMemoryPokeIntNative(HInvoke* invoke) { - CreateIntIntToVoidLocations(arena_, invoke); + CreateIntIntToVoidLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeIntNative(HInvoke* invoke) { @@ -1479,7 +1471,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeIntNative(HInvoke* invoke) { // void libcore.io.Memory.pokeLong(long address, long value) void IntrinsicLocationsBuilderMIPS::VisitMemoryPokeLongNative(HInvoke* invoke) { - CreateIntIntToVoidLocations(arena_, invoke); + CreateIntIntToVoidLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeLongNative(HInvoke* invoke) { @@ -1501,9 +1493,8 @@ void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeLongNative(HInvoke* invoke) { // Thread java.lang.Thread.currentThread() void IntrinsicLocationsBuilderMIPS::VisitThreadCurrentThread(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetOut(Location::RequiresRegister()); } @@ -1517,17 +1508,18 @@ void IntrinsicCodeGeneratorMIPS::VisitThreadCurrentThread(HInvoke* invoke) { Thread::PeerOffset<kMipsPointerSize>().Int32Value()); } -static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, +static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke, DataType::Type type) { bool can_call = kEmitCompilerReadBarrier && (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject || invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile); - LocationSummary* locations = new (arena) LocationSummary(invoke, - (can_call - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall), - kIntrinsified); + LocationSummary* locations = + new (allocator) LocationSummary(invoke, + can_call + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall, + kIntrinsified); if (can_call && kUseBakerReadBarrier) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } @@ -1657,7 +1649,7 @@ static void GenUnsafeGet(HInvoke* invoke, // int sun.misc.Unsafe.getInt(Object o, long offset) void IntrinsicLocationsBuilderMIPS::VisitUnsafeGet(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32); + CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32); } void IntrinsicCodeGeneratorMIPS::VisitUnsafeGet(HInvoke* invoke) { @@ -1666,7 +1658,7 @@ void IntrinsicCodeGeneratorMIPS::VisitUnsafeGet(HInvoke* invoke) { // int sun.misc.Unsafe.getIntVolatile(Object o, long offset) void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32); + CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32); } void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) { @@ -1675,7 +1667,7 @@ void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) { // long sun.misc.Unsafe.getLong(Object o, long offset) void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetLong(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64); + CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64); } void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetLong(HInvoke* invoke) { @@ -1684,7 +1676,7 @@ void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetLong(HInvoke* invoke) { // Object sun.misc.Unsafe.getObject(Object o, long offset) void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetObject(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference); + CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference); } void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObject(HInvoke* invoke) { @@ -1693,17 +1685,16 @@ void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObject(HInvoke* invoke) { // Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset) void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference); + CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference); } void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, IsR6(), codegen_); } -static void CreateIntIntIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateIntIntIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::NoLocation()); // Unused receiver. locations->SetInAt(1, Location::RequiresRegister()); locations->SetInAt(2, Location::RequiresRegister()); @@ -1774,7 +1765,7 @@ static void GenUnsafePut(LocationSummary* locations, // void sun.misc.Unsafe.putInt(Object o, long offset, int x) void IntrinsicLocationsBuilderMIPS::VisitUnsafePut(HInvoke* invoke) { - CreateIntIntIntIntToVoidLocations(arena_, invoke); + CreateIntIntIntIntToVoidLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitUnsafePut(HInvoke* invoke) { @@ -1788,7 +1779,7 @@ void IntrinsicCodeGeneratorMIPS::VisitUnsafePut(HInvoke* invoke) { // void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x) void IntrinsicLocationsBuilderMIPS::VisitUnsafePutOrdered(HInvoke* invoke) { - CreateIntIntIntIntToVoidLocations(arena_, invoke); + CreateIntIntIntIntToVoidLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitUnsafePutOrdered(HInvoke* invoke) { @@ -1802,7 +1793,7 @@ void IntrinsicCodeGeneratorMIPS::VisitUnsafePutOrdered(HInvoke* invoke) { // void sun.misc.Unsafe.putIntVolatile(Object o, long offset, int x) void IntrinsicLocationsBuilderMIPS::VisitUnsafePutVolatile(HInvoke* invoke) { - CreateIntIntIntIntToVoidLocations(arena_, invoke); + CreateIntIntIntIntToVoidLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitUnsafePutVolatile(HInvoke* invoke) { @@ -1816,7 +1807,7 @@ void IntrinsicCodeGeneratorMIPS::VisitUnsafePutVolatile(HInvoke* invoke) { // void sun.misc.Unsafe.putObject(Object o, long offset, Object x) void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObject(HInvoke* invoke) { - CreateIntIntIntIntToVoidLocations(arena_, invoke); + CreateIntIntIntIntToVoidLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObject(HInvoke* invoke) { @@ -1830,7 +1821,7 @@ void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObject(HInvoke* invoke) { // void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x) void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke) { - CreateIntIntIntIntToVoidLocations(arena_, invoke); + CreateIntIntIntIntToVoidLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke) { @@ -1844,7 +1835,7 @@ void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke) { // void sun.misc.Unsafe.putObjectVolatile(Object o, long offset, Object x) void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke) { - CreateIntIntIntIntToVoidLocations(arena_, invoke); + CreateIntIntIntIntToVoidLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke) { @@ -1858,7 +1849,7 @@ void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke) { // void sun.misc.Unsafe.putLong(Object o, long offset, long x) void IntrinsicLocationsBuilderMIPS::VisitUnsafePutLong(HInvoke* invoke) { - CreateIntIntIntIntToVoidLocations(arena_, invoke); + CreateIntIntIntIntToVoidLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLong(HInvoke* invoke) { @@ -1872,7 +1863,7 @@ void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLong(HInvoke* invoke) { // void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x) void IntrinsicLocationsBuilderMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) { - CreateIntIntIntIntToVoidLocations(arena_, invoke); + CreateIntIntIntIntToVoidLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) { @@ -1884,15 +1875,16 @@ void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) { codegen_); } -static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* arena, HInvoke* invoke) { +static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* allocator, HInvoke* invoke) { bool can_call = kEmitCompilerReadBarrier && kUseBakerReadBarrier && (invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject); - LocationSummary* locations = new (arena) LocationSummary(invoke, - (can_call - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall), - kIntrinsified); + LocationSummary* locations = + new (allocator) LocationSummary(invoke, + can_call + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall, + kIntrinsified); locations->SetInAt(0, Location::NoLocation()); // Unused receiver. locations->SetInAt(1, Location::RequiresRegister()); locations->SetInAt(2, Location::RequiresRegister()); @@ -2016,7 +2008,7 @@ static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorMIPS* code // boolean sun.misc.Unsafe.compareAndSwapInt(Object o, long offset, int expected, int x) void IntrinsicLocationsBuilderMIPS::VisitUnsafeCASInt(HInvoke* invoke) { - CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke); + CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitUnsafeCASInt(HInvoke* invoke) { @@ -2031,7 +2023,7 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafeCASObject(HInvoke* invoke) { return; } - CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke); + CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitUnsafeCASObject(HInvoke* invoke) { @@ -2044,9 +2036,8 @@ void IntrinsicCodeGeneratorMIPS::VisitUnsafeCASObject(HInvoke* invoke) { // int java.lang.String.compareTo(String anotherString) void IntrinsicLocationsBuilderMIPS::VisitStringCompareTo(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainAndSlowPath, - kIntrinsified); + LocationSummary* locations = new (allocator_) LocationSummary( + invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); @@ -2071,9 +2062,8 @@ void IntrinsicCodeGeneratorMIPS::VisitStringCompareTo(HInvoke* invoke) { // boolean java.lang.String.equals(Object anObject) void IntrinsicLocationsBuilderMIPS::VisitStringEquals(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister()); @@ -2248,9 +2238,8 @@ static void GenerateStringIndexOf(HInvoke* invoke, // int java.lang.String.indexOf(int ch) void IntrinsicLocationsBuilderMIPS::VisitStringIndexOf(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainAndSlowPath, - kIntrinsified); + LocationSummary* locations = new (allocator_) LocationSummary( + invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); // We have a hand-crafted assembly stub that follows the runtime // calling convention. So it's best to align the inputs accordingly. InvokeRuntimeCallingConvention calling_convention; @@ -2273,9 +2262,8 @@ void IntrinsicCodeGeneratorMIPS::VisitStringIndexOf(HInvoke* invoke) { // int java.lang.String.indexOf(int ch, int fromIndex) void IntrinsicLocationsBuilderMIPS::VisitStringIndexOfAfter(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainAndSlowPath, - kIntrinsified); + LocationSummary* locations = new (allocator_) LocationSummary( + invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); // We have a hand-crafted assembly stub that follows the runtime // calling convention. So it's best to align the inputs accordingly. InvokeRuntimeCallingConvention calling_convention; @@ -2299,9 +2287,8 @@ void IntrinsicCodeGeneratorMIPS::VisitStringIndexOfAfter(HInvoke* invoke) { // java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount) void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromBytes(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainAndSlowPath, - kIntrinsified); + LocationSummary* locations = new (allocator_) LocationSummary( + invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); @@ -2325,9 +2312,8 @@ void IntrinsicCodeGeneratorMIPS::VisitStringNewStringFromBytes(HInvoke* invoke) // java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data) void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromChars(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainOnly, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); @@ -2348,9 +2334,8 @@ void IntrinsicCodeGeneratorMIPS::VisitStringNewStringFromChars(HInvoke* invoke) // java.lang.StringFactory.newStringFromString(String toCopy) void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromString(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainAndSlowPath, - kIntrinsified); + LocationSummary* locations = new (allocator_) LocationSummary( + invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32); @@ -2411,7 +2396,7 @@ static void GenIsInfinite(LocationSummary* locations, // boolean java.lang.Float.isInfinite(float) void IntrinsicLocationsBuilderMIPS::VisitFloatIsInfinite(HInvoke* invoke) { - CreateFPToIntLocations(arena_, invoke); + CreateFPToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitFloatIsInfinite(HInvoke* invoke) { @@ -2420,7 +2405,7 @@ void IntrinsicCodeGeneratorMIPS::VisitFloatIsInfinite(HInvoke* invoke) { // boolean java.lang.Double.isInfinite(double) void IntrinsicLocationsBuilderMIPS::VisitDoubleIsInfinite(HInvoke* invoke) { - CreateFPToIntLocations(arena_, invoke); + CreateFPToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitDoubleIsInfinite(HInvoke* invoke) { @@ -2476,7 +2461,7 @@ static void GenHighestOneBit(LocationSummary* locations, // int java.lang.Integer.highestOneBit(int) void IntrinsicLocationsBuilderMIPS::VisitIntegerHighestOneBit(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitIntegerHighestOneBit(HInvoke* invoke) { @@ -2485,7 +2470,7 @@ void IntrinsicCodeGeneratorMIPS::VisitIntegerHighestOneBit(HInvoke* invoke) { // long java.lang.Long.highestOneBit(long) void IntrinsicLocationsBuilderMIPS::VisitLongHighestOneBit(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke, Location::kOutputOverlap); + CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap); } void IntrinsicCodeGeneratorMIPS::VisitLongHighestOneBit(HInvoke* invoke) { @@ -2524,7 +2509,7 @@ static void GenLowestOneBit(LocationSummary* locations, // int java.lang.Integer.lowestOneBit(int) void IntrinsicLocationsBuilderMIPS::VisitIntegerLowestOneBit(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitIntegerLowestOneBit(HInvoke* invoke) { @@ -2533,7 +2518,7 @@ void IntrinsicCodeGeneratorMIPS::VisitIntegerLowestOneBit(HInvoke* invoke) { // long java.lang.Long.lowestOneBit(long) void IntrinsicLocationsBuilderMIPS::VisitLongLowestOneBit(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitLongLowestOneBit(HInvoke* invoke) { @@ -2542,9 +2527,8 @@ void IntrinsicCodeGeneratorMIPS::VisitLongLowestOneBit(HInvoke* invoke) { // int java.lang.Math.round(float) void IntrinsicLocationsBuilderMIPS::VisitMathRoundFloat(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->AddTemp(Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); @@ -2667,9 +2651,8 @@ void IntrinsicCodeGeneratorMIPS::VisitMathRoundFloat(HInvoke* invoke) { // void java.lang.String.getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin) void IntrinsicLocationsBuilderMIPS::VisitStringGetCharsNoCheck(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); locations->SetInAt(2, Location::RequiresRegister()); @@ -2757,20 +2740,18 @@ void IntrinsicCodeGeneratorMIPS::VisitStringGetCharsNoCheck(HInvoke* invoke) { __ Bind(&done); } -static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kCallOnMainOnly, - kIntrinsified); +static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0))); locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kFloat64)); } -static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kCallOnMainOnly, - kIntrinsified); +static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0))); @@ -2804,7 +2785,7 @@ static void GenFPFPToFPCall(HInvoke* invoke, // static double java.lang.Math.cos(double a) void IntrinsicLocationsBuilderMIPS::VisitMathCos(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathCos(HInvoke* invoke) { @@ -2813,7 +2794,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathCos(HInvoke* invoke) { // static double java.lang.Math.sin(double a) void IntrinsicLocationsBuilderMIPS::VisitMathSin(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathSin(HInvoke* invoke) { @@ -2822,7 +2803,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathSin(HInvoke* invoke) { // static double java.lang.Math.acos(double a) void IntrinsicLocationsBuilderMIPS::VisitMathAcos(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathAcos(HInvoke* invoke) { @@ -2831,7 +2812,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathAcos(HInvoke* invoke) { // static double java.lang.Math.asin(double a) void IntrinsicLocationsBuilderMIPS::VisitMathAsin(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathAsin(HInvoke* invoke) { @@ -2840,7 +2821,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathAsin(HInvoke* invoke) { // static double java.lang.Math.atan(double a) void IntrinsicLocationsBuilderMIPS::VisitMathAtan(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathAtan(HInvoke* invoke) { @@ -2849,7 +2830,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathAtan(HInvoke* invoke) { // static double java.lang.Math.atan2(double y, double x) void IntrinsicLocationsBuilderMIPS::VisitMathAtan2(HInvoke* invoke) { - CreateFPFPToFPCallLocations(arena_, invoke); + CreateFPFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathAtan2(HInvoke* invoke) { @@ -2858,7 +2839,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathAtan2(HInvoke* invoke) { // static double java.lang.Math.cbrt(double a) void IntrinsicLocationsBuilderMIPS::VisitMathCbrt(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathCbrt(HInvoke* invoke) { @@ -2867,7 +2848,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathCbrt(HInvoke* invoke) { // static double java.lang.Math.cosh(double x) void IntrinsicLocationsBuilderMIPS::VisitMathCosh(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathCosh(HInvoke* invoke) { @@ -2876,7 +2857,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathCosh(HInvoke* invoke) { // static double java.lang.Math.exp(double a) void IntrinsicLocationsBuilderMIPS::VisitMathExp(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathExp(HInvoke* invoke) { @@ -2885,7 +2866,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathExp(HInvoke* invoke) { // static double java.lang.Math.expm1(double x) void IntrinsicLocationsBuilderMIPS::VisitMathExpm1(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathExpm1(HInvoke* invoke) { @@ -2894,7 +2875,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathExpm1(HInvoke* invoke) { // static double java.lang.Math.hypot(double x, double y) void IntrinsicLocationsBuilderMIPS::VisitMathHypot(HInvoke* invoke) { - CreateFPFPToFPCallLocations(arena_, invoke); + CreateFPFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathHypot(HInvoke* invoke) { @@ -2903,7 +2884,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathHypot(HInvoke* invoke) { // static double java.lang.Math.log(double a) void IntrinsicLocationsBuilderMIPS::VisitMathLog(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathLog(HInvoke* invoke) { @@ -2912,7 +2893,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathLog(HInvoke* invoke) { // static double java.lang.Math.log10(double x) void IntrinsicLocationsBuilderMIPS::VisitMathLog10(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathLog10(HInvoke* invoke) { @@ -2921,7 +2902,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathLog10(HInvoke* invoke) { // static double java.lang.Math.nextAfter(double start, double direction) void IntrinsicLocationsBuilderMIPS::VisitMathNextAfter(HInvoke* invoke) { - CreateFPFPToFPCallLocations(arena_, invoke); + CreateFPFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathNextAfter(HInvoke* invoke) { @@ -2930,7 +2911,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathNextAfter(HInvoke* invoke) { // static double java.lang.Math.sinh(double x) void IntrinsicLocationsBuilderMIPS::VisitMathSinh(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathSinh(HInvoke* invoke) { @@ -2939,7 +2920,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathSinh(HInvoke* invoke) { // static double java.lang.Math.tan(double a) void IntrinsicLocationsBuilderMIPS::VisitMathTan(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathTan(HInvoke* invoke) { @@ -2948,7 +2929,7 @@ void IntrinsicCodeGeneratorMIPS::VisitMathTan(HInvoke* invoke) { // static double java.lang.Math.tanh(double x) void IntrinsicLocationsBuilderMIPS::VisitMathTanh(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS::VisitMathTanh(HInvoke* invoke) { @@ -2982,7 +2963,7 @@ void IntrinsicLocationsBuilderMIPS::VisitSystemArrayCopyChar(HInvoke* invoke) { // Okay, it is safe to generate inline code. LocationSummary* locations = - new (arena_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified); + new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified); // arraycopy(Object src, int srcPos, Object dest, int destPos, int length). locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1))); diff --git a/compiler/optimizing/intrinsics_mips.h b/compiler/optimizing/intrinsics_mips.h index 05d1aa284a..afd9548a60 100644 --- a/compiler/optimizing/intrinsics_mips.h +++ b/compiler/optimizing/intrinsics_mips.h @@ -50,7 +50,7 @@ class IntrinsicLocationsBuilderMIPS FINAL : public IntrinsicVisitor { private: CodeGeneratorMIPS* codegen_; - ArenaAllocator* arena_; + ArenaAllocator* allocator_; DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS); }; diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc index d0234d8271..8d5be80202 100644 --- a/compiler/optimizing/intrinsics_mips64.cc +++ b/compiler/optimizing/intrinsics_mips64.cc @@ -35,7 +35,7 @@ namespace art { namespace mips64 { IntrinsicLocationsBuilderMIPS64::IntrinsicLocationsBuilderMIPS64(CodeGeneratorMIPS64* codegen) - : codegen_(codegen), arena_(codegen->GetGraph()->GetArena()) { + : codegen_(codegen), allocator_(codegen->GetGraph()->GetAllocator()) { } Mips64Assembler* IntrinsicCodeGeneratorMIPS64::GetAssembler() { @@ -43,7 +43,7 @@ Mips64Assembler* IntrinsicCodeGeneratorMIPS64::GetAssembler() { } ArenaAllocator* IntrinsicCodeGeneratorMIPS64::GetAllocator() { - return codegen_->GetGraph()->GetArena(); + return codegen_->GetGraph()->GetAllocator(); } #define __ codegen->GetAssembler()-> @@ -141,10 +141,9 @@ bool IntrinsicLocationsBuilderMIPS64::TryDispatch(HInvoke* invoke) { #define __ assembler-> -static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); } @@ -162,7 +161,7 @@ static void MoveFPToInt(LocationSummary* locations, bool is64bit, Mips64Assemble // long java.lang.Double.doubleToRawLongBits(double) void IntrinsicLocationsBuilderMIPS64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) { - CreateFPToIntLocations(arena_, invoke); + CreateFPToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) { @@ -171,17 +170,16 @@ void IntrinsicCodeGeneratorMIPS64::VisitDoubleDoubleToRawLongBits(HInvoke* invok // int java.lang.Float.floatToRawIntBits(float) void IntrinsicLocationsBuilderMIPS64::VisitFloatFloatToRawIntBits(HInvoke* invoke) { - CreateFPToIntLocations(arena_, invoke); + CreateFPToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitFloatFloatToRawIntBits(HInvoke* invoke) { MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); } -static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresFpuRegister()); } @@ -199,7 +197,7 @@ static void MoveIntToFP(LocationSummary* locations, bool is64bit, Mips64Assemble // double java.lang.Double.longBitsToDouble(long) void IntrinsicLocationsBuilderMIPS64::VisitDoubleLongBitsToDouble(HInvoke* invoke) { - CreateIntToFPLocations(arena_, invoke); + CreateIntToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitDoubleLongBitsToDouble(HInvoke* invoke) { @@ -208,17 +206,16 @@ void IntrinsicCodeGeneratorMIPS64::VisitDoubleLongBitsToDouble(HInvoke* invoke) // float java.lang.Float.intBitsToFloat(int) void IntrinsicLocationsBuilderMIPS64::VisitFloatIntBitsToFloat(HInvoke* invoke) { - CreateIntToFPLocations(arena_, invoke); + CreateIntToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitFloatIntBitsToFloat(HInvoke* invoke) { MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); } -static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); } @@ -250,7 +247,7 @@ static void GenReverseBytes(LocationSummary* locations, // int java.lang.Integer.reverseBytes(int) void IntrinsicLocationsBuilderMIPS64::VisitIntegerReverseBytes(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitIntegerReverseBytes(HInvoke* invoke) { @@ -259,7 +256,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitIntegerReverseBytes(HInvoke* invoke) { // long java.lang.Long.reverseBytes(long) void IntrinsicLocationsBuilderMIPS64::VisitLongReverseBytes(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitLongReverseBytes(HInvoke* invoke) { @@ -268,7 +265,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitLongReverseBytes(HInvoke* invoke) { // short java.lang.Short.reverseBytes(short) void IntrinsicLocationsBuilderMIPS64::VisitShortReverseBytes(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitShortReverseBytes(HInvoke* invoke) { @@ -290,7 +287,7 @@ static void GenNumberOfLeadingZeroes(LocationSummary* locations, // int java.lang.Integer.numberOfLeadingZeros(int i) void IntrinsicLocationsBuilderMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { @@ -299,7 +296,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke* inv // int java.lang.Long.numberOfLeadingZeros(long i) void IntrinsicLocationsBuilderMIPS64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { @@ -327,7 +324,7 @@ static void GenNumberOfTrailingZeroes(LocationSummary* locations, // int java.lang.Integer.numberOfTrailingZeros(int i) void IntrinsicLocationsBuilderMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) { @@ -336,7 +333,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* in // int java.lang.Long.numberOfTrailingZeros(long i) void IntrinsicLocationsBuilderMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) { @@ -364,7 +361,7 @@ static void GenReverse(LocationSummary* locations, // int java.lang.Integer.reverse(int) void IntrinsicLocationsBuilderMIPS64::VisitIntegerReverse(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitIntegerReverse(HInvoke* invoke) { @@ -373,17 +370,16 @@ void IntrinsicCodeGeneratorMIPS64::VisitIntegerReverse(HInvoke* invoke) { // long java.lang.Long.reverse(long) void IntrinsicLocationsBuilderMIPS64::VisitLongReverse(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitLongReverse(HInvoke* invoke) { GenReverse(invoke->GetLocations(), DataType::Type::kInt64, GetAssembler()); } -static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); } @@ -458,7 +454,7 @@ static void GenBitCount(LocationSummary* locations, // int java.lang.Integer.bitCount(int) void IntrinsicLocationsBuilderMIPS64::VisitIntegerBitCount(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitIntegerBitCount(HInvoke* invoke) { @@ -467,7 +463,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitIntegerBitCount(HInvoke* invoke) { // int java.lang.Long.bitCount(long) void IntrinsicLocationsBuilderMIPS64::VisitLongBitCount(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitLongBitCount(HInvoke* invoke) { @@ -487,7 +483,7 @@ static void MathAbsFP(LocationSummary* locations, bool is64bit, Mips64Assembler* // double java.lang.Math.abs(double) void IntrinsicLocationsBuilderMIPS64::VisitMathAbsDouble(HInvoke* invoke) { - CreateFPToFPLocations(arena_, invoke); + CreateFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathAbsDouble(HInvoke* invoke) { @@ -496,17 +492,16 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathAbsDouble(HInvoke* invoke) { // float java.lang.Math.abs(float) void IntrinsicLocationsBuilderMIPS64::VisitMathAbsFloat(HInvoke* invoke) { - CreateFPToFPLocations(arena_, invoke); + CreateFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathAbsFloat(HInvoke* invoke) { MathAbsFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); } -static void CreateIntToInt(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateIntToInt(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); } @@ -528,7 +523,7 @@ static void GenAbsInteger(LocationSummary* locations, bool is64bit, Mips64Assemb // int java.lang.Math.abs(int) void IntrinsicLocationsBuilderMIPS64::VisitMathAbsInt(HInvoke* invoke) { - CreateIntToInt(arena_, invoke); + CreateIntToInt(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathAbsInt(HInvoke* invoke) { @@ -537,7 +532,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathAbsInt(HInvoke* invoke) { // long java.lang.Math.abs(long) void IntrinsicLocationsBuilderMIPS64::VisitMathAbsLong(HInvoke* invoke) { - CreateIntToInt(arena_, invoke); + CreateIntToInt(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathAbsLong(HInvoke* invoke) { @@ -613,10 +608,9 @@ static void GenMinMaxFP(LocationSummary* locations, __ Bind(&done); } -static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); @@ -624,7 +618,7 @@ static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) { // double java.lang.Math.min(double, double) void IntrinsicLocationsBuilderMIPS64::VisitMathMinDoubleDouble(HInvoke* invoke) { - CreateFPFPToFPLocations(arena_, invoke); + CreateFPFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathMinDoubleDouble(HInvoke* invoke) { @@ -633,7 +627,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathMinDoubleDouble(HInvoke* invoke) { // float java.lang.Math.min(float, float) void IntrinsicLocationsBuilderMIPS64::VisitMathMinFloatFloat(HInvoke* invoke) { - CreateFPFPToFPLocations(arena_, invoke); + CreateFPFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathMinFloatFloat(HInvoke* invoke) { @@ -642,7 +636,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathMinFloatFloat(HInvoke* invoke) { // double java.lang.Math.max(double, double) void IntrinsicLocationsBuilderMIPS64::VisitMathMaxDoubleDouble(HInvoke* invoke) { - CreateFPFPToFPLocations(arena_, invoke); + CreateFPFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathMaxDoubleDouble(HInvoke* invoke) { @@ -651,7 +645,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathMaxDoubleDouble(HInvoke* invoke) { // float java.lang.Math.max(float, float) void IntrinsicLocationsBuilderMIPS64::VisitMathMaxFloatFloat(HInvoke* invoke) { - CreateFPFPToFPLocations(arena_, invoke); + CreateFPFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathMaxFloatFloat(HInvoke* invoke) { @@ -716,10 +710,9 @@ static void GenMinMax(LocationSummary* locations, } } -static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); @@ -727,7 +720,7 @@ static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { // int java.lang.Math.min(int, int) void IntrinsicLocationsBuilderMIPS64::VisitMathMinIntInt(HInvoke* invoke) { - CreateIntIntToIntLocations(arena_, invoke); + CreateIntIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathMinIntInt(HInvoke* invoke) { @@ -736,7 +729,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathMinIntInt(HInvoke* invoke) { // long java.lang.Math.min(long, long) void IntrinsicLocationsBuilderMIPS64::VisitMathMinLongLong(HInvoke* invoke) { - CreateIntIntToIntLocations(arena_, invoke); + CreateIntIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathMinLongLong(HInvoke* invoke) { @@ -745,7 +738,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathMinLongLong(HInvoke* invoke) { // int java.lang.Math.max(int, int) void IntrinsicLocationsBuilderMIPS64::VisitMathMaxIntInt(HInvoke* invoke) { - CreateIntIntToIntLocations(arena_, invoke); + CreateIntIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathMaxIntInt(HInvoke* invoke) { @@ -754,7 +747,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathMaxIntInt(HInvoke* invoke) { // long java.lang.Math.max(long, long) void IntrinsicLocationsBuilderMIPS64::VisitMathMaxLongLong(HInvoke* invoke) { - CreateIntIntToIntLocations(arena_, invoke); + CreateIntIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathMaxLongLong(HInvoke* invoke) { @@ -763,7 +756,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathMaxLongLong(HInvoke* invoke) { // double java.lang.Math.sqrt(double) void IntrinsicLocationsBuilderMIPS64::VisitMathSqrt(HInvoke* invoke) { - CreateFPToFPLocations(arena_, invoke); + CreateFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathSqrt(HInvoke* invoke) { @@ -775,19 +768,18 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathSqrt(HInvoke* invoke) { __ SqrtD(out, in); } -static void CreateFPToFP(ArenaAllocator* arena, +static void CreateFPToFP(ArenaAllocator* allocator, HInvoke* invoke, Location::OutputOverlap overlaps = Location::kOutputOverlap) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister(), overlaps); } // double java.lang.Math.rint(double) void IntrinsicLocationsBuilderMIPS64::VisitMathRint(HInvoke* invoke) { - CreateFPToFP(arena_, invoke, Location::kNoOutputOverlap); + CreateFPToFP(allocator_, invoke, Location::kNoOutputOverlap); } void IntrinsicCodeGeneratorMIPS64::VisitMathRint(HInvoke* invoke) { @@ -801,7 +793,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathRint(HInvoke* invoke) { // double java.lang.Math.floor(double) void IntrinsicLocationsBuilderMIPS64::VisitMathFloor(HInvoke* invoke) { - CreateFPToFP(arena_, invoke); + CreateFPToFP(allocator_, invoke); } const constexpr uint16_t kFPLeaveUnchanged = kPositiveZero | @@ -878,7 +870,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathFloor(HInvoke* invoke) { // double java.lang.Math.ceil(double) void IntrinsicLocationsBuilderMIPS64::VisitMathCeil(HInvoke* invoke) { - CreateFPToFP(arena_, invoke); + CreateFPToFP(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathCeil(HInvoke* invoke) { @@ -961,9 +953,8 @@ static void GenRound(LocationSummary* locations, Mips64Assembler* assembler, Dat // int java.lang.Math.round(float) void IntrinsicLocationsBuilderMIPS64::VisitMathRoundFloat(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->AddTemp(Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); @@ -975,9 +966,8 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathRoundFloat(HInvoke* invoke) { // long java.lang.Math.round(double) void IntrinsicLocationsBuilderMIPS64::VisitMathRoundDouble(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->AddTemp(Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); @@ -989,7 +979,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathRoundDouble(HInvoke* invoke) { // byte libcore.io.Memory.peekByte(long address) void IntrinsicLocationsBuilderMIPS64::VisitMemoryPeekByte(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekByte(HInvoke* invoke) { @@ -1002,7 +992,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekByte(HInvoke* invoke) { // short libcore.io.Memory.peekShort(long address) void IntrinsicLocationsBuilderMIPS64::VisitMemoryPeekShortNative(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekShortNative(HInvoke* invoke) { @@ -1015,7 +1005,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekShortNative(HInvoke* invoke) { // int libcore.io.Memory.peekInt(long address) void IntrinsicLocationsBuilderMIPS64::VisitMemoryPeekIntNative(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekIntNative(HInvoke* invoke) { @@ -1028,7 +1018,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekIntNative(HInvoke* invoke) { // long libcore.io.Memory.peekLong(long address) void IntrinsicLocationsBuilderMIPS64::VisitMemoryPeekLongNative(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekLongNative(HInvoke* invoke) { @@ -1039,17 +1029,16 @@ void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekLongNative(HInvoke* invoke) { __ Ld(out, adr, 0); } -static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); } // void libcore.io.Memory.pokeByte(long address, byte value) void IntrinsicLocationsBuilderMIPS64::VisitMemoryPokeByte(HInvoke* invoke) { - CreateIntIntToVoidLocations(arena_, invoke); + CreateIntIntToVoidLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeByte(HInvoke* invoke) { @@ -1062,7 +1051,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeByte(HInvoke* invoke) { // void libcore.io.Memory.pokeShort(long address, short value) void IntrinsicLocationsBuilderMIPS64::VisitMemoryPokeShortNative(HInvoke* invoke) { - CreateIntIntToVoidLocations(arena_, invoke); + CreateIntIntToVoidLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeShortNative(HInvoke* invoke) { @@ -1075,7 +1064,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeShortNative(HInvoke* invoke) { // void libcore.io.Memory.pokeInt(long address, int value) void IntrinsicLocationsBuilderMIPS64::VisitMemoryPokeIntNative(HInvoke* invoke) { - CreateIntIntToVoidLocations(arena_, invoke); + CreateIntIntToVoidLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeIntNative(HInvoke* invoke) { @@ -1088,7 +1077,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeIntNative(HInvoke* invoke) { // void libcore.io.Memory.pokeLong(long address, long value) void IntrinsicLocationsBuilderMIPS64::VisitMemoryPokeLongNative(HInvoke* invoke) { - CreateIntIntToVoidLocations(arena_, invoke); + CreateIntIntToVoidLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeLongNative(HInvoke* invoke) { @@ -1101,9 +1090,8 @@ void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeLongNative(HInvoke* invoke) { // Thread java.lang.Thread.currentThread() void IntrinsicLocationsBuilderMIPS64::VisitThreadCurrentThread(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetOut(Location::RequiresRegister()); } @@ -1117,17 +1105,18 @@ void IntrinsicCodeGeneratorMIPS64::VisitThreadCurrentThread(HInvoke* invoke) { Thread::PeerOffset<kMips64PointerSize>().Int32Value()); } -static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, +static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke, DataType::Type type) { bool can_call = kEmitCompilerReadBarrier && (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject || invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile); - LocationSummary* locations = new (arena) LocationSummary(invoke, - (can_call - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall), - kIntrinsified); + LocationSummary* locations = + new (allocator) LocationSummary(invoke, + can_call + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall, + kIntrinsified); if (can_call && kUseBakerReadBarrier) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } @@ -1227,7 +1216,7 @@ static void GenUnsafeGet(HInvoke* invoke, // int sun.misc.Unsafe.getInt(Object o, long offset) void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGet(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32); + CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32); } void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGet(HInvoke* invoke) { @@ -1236,7 +1225,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGet(HInvoke* invoke) { // int sun.misc.Unsafe.getIntVolatile(Object o, long offset) void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32); + CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32); } void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) { @@ -1245,7 +1234,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) { // long sun.misc.Unsafe.getLong(Object o, long offset) void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLong(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64); + CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64); } void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLong(HInvoke* invoke) { @@ -1254,7 +1243,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLong(HInvoke* invoke) { // long sun.misc.Unsafe.getLongVolatile(Object o, long offset) void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64); + CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64); } void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) { @@ -1263,7 +1252,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) { // Object sun.misc.Unsafe.getObject(Object o, long offset) void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObject(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference); + CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference); } void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObject(HInvoke* invoke) { @@ -1272,17 +1261,16 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObject(HInvoke* invoke) { // Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset) void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference); + CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference); } void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_); } -static void CreateIntIntIntIntToVoid(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::NoLocation()); // Unused receiver. locations->SetInAt(1, Location::RequiresRegister()); locations->SetInAt(2, Location::RequiresRegister()); @@ -1341,7 +1329,7 @@ static void GenUnsafePut(LocationSummary* locations, // void sun.misc.Unsafe.putInt(Object o, long offset, int x) void IntrinsicLocationsBuilderMIPS64::VisitUnsafePut(HInvoke* invoke) { - CreateIntIntIntIntToVoid(arena_, invoke); + CreateIntIntIntIntToVoid(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitUnsafePut(HInvoke* invoke) { @@ -1354,7 +1342,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafePut(HInvoke* invoke) { // void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x) void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) { - CreateIntIntIntIntToVoid(arena_, invoke); + CreateIntIntIntIntToVoid(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) { @@ -1367,7 +1355,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) { // void sun.misc.Unsafe.putIntVolatile(Object o, long offset, int x) void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) { - CreateIntIntIntIntToVoid(arena_, invoke); + CreateIntIntIntIntToVoid(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) { @@ -1380,7 +1368,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) { // void sun.misc.Unsafe.putObject(Object o, long offset, Object x) void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObject(HInvoke* invoke) { - CreateIntIntIntIntToVoid(arena_, invoke); + CreateIntIntIntIntToVoid(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObject(HInvoke* invoke) { @@ -1393,7 +1381,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObject(HInvoke* invoke) { // void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x) void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke) { - CreateIntIntIntIntToVoid(arena_, invoke); + CreateIntIntIntIntToVoid(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke) { @@ -1406,7 +1394,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke) // void sun.misc.Unsafe.putObjectVolatile(Object o, long offset, Object x) void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke) { - CreateIntIntIntIntToVoid(arena_, invoke); + CreateIntIntIntIntToVoid(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke) { @@ -1419,7 +1407,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke) // void sun.misc.Unsafe.putLong(Object o, long offset, long x) void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLong(HInvoke* invoke) { - CreateIntIntIntIntToVoid(arena_, invoke); + CreateIntIntIntIntToVoid(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLong(HInvoke* invoke) { @@ -1432,7 +1420,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLong(HInvoke* invoke) { // void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x) void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) { - CreateIntIntIntIntToVoid(arena_, invoke); + CreateIntIntIntIntToVoid(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) { @@ -1445,7 +1433,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) { // void sun.misc.Unsafe.putLongVolatile(Object o, long offset, long x) void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) { - CreateIntIntIntIntToVoid(arena_, invoke); + CreateIntIntIntIntToVoid(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) { @@ -1456,15 +1444,16 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) { codegen_); } -static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* arena, HInvoke* invoke) { +static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* allocator, HInvoke* invoke) { bool can_call = kEmitCompilerReadBarrier && kUseBakerReadBarrier && (invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject); - LocationSummary* locations = new (arena) LocationSummary(invoke, - (can_call - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall), - kIntrinsified); + LocationSummary* locations = + new (allocator) LocationSummary(invoke, + can_call + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall, + kIntrinsified); locations->SetInAt(0, Location::NoLocation()); // Unused receiver. locations->SetInAt(1, Location::RequiresRegister()); locations->SetInAt(2, Location::RequiresRegister()); @@ -1583,7 +1572,7 @@ static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorMIPS64* co // boolean sun.misc.Unsafe.compareAndSwapInt(Object o, long offset, int expected, int x) void IntrinsicLocationsBuilderMIPS64::VisitUnsafeCASInt(HInvoke* invoke) { - CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke); + CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASInt(HInvoke* invoke) { @@ -1592,7 +1581,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASInt(HInvoke* invoke) { // boolean sun.misc.Unsafe.compareAndSwapLong(Object o, long offset, long expected, long x) void IntrinsicLocationsBuilderMIPS64::VisitUnsafeCASLong(HInvoke* invoke) { - CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke); + CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASLong(HInvoke* invoke) { @@ -1607,7 +1596,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafeCASObject(HInvoke* invoke) { return; } - CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke); + CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASObject(HInvoke* invoke) { @@ -1620,9 +1609,8 @@ void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASObject(HInvoke* invoke) { // int java.lang.String.compareTo(String anotherString) void IntrinsicLocationsBuilderMIPS64::VisitStringCompareTo(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainAndSlowPath, - kIntrinsified); + LocationSummary* locations = new (allocator_) LocationSummary( + invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); @@ -1648,9 +1636,8 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringCompareTo(HInvoke* invoke) { // boolean java.lang.String.equals(Object anObject) void IntrinsicLocationsBuilderMIPS64::VisitStringEquals(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister()); @@ -1814,9 +1801,8 @@ static void GenerateStringIndexOf(HInvoke* invoke, // int java.lang.String.indexOf(int ch) void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOf(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainAndSlowPath, - kIntrinsified); + LocationSummary* locations = new (allocator_) LocationSummary( + invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); // We have a hand-crafted assembly stub that follows the runtime // calling convention. So it's best to align the inputs accordingly. InvokeRuntimeCallingConvention calling_convention; @@ -1835,9 +1821,8 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOf(HInvoke* invoke) { // int java.lang.String.indexOf(int ch, int fromIndex) void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainAndSlowPath, - kIntrinsified); + LocationSummary* locations = new (allocator_) LocationSummary( + invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); // We have a hand-crafted assembly stub that follows the runtime // calling convention. So it's best to align the inputs accordingly. InvokeRuntimeCallingConvention calling_convention; @@ -1855,9 +1840,8 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) { // java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount) void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromBytes(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainAndSlowPath, - kIntrinsified); + LocationSummary* locations = new (allocator_) LocationSummary( + invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); @@ -1883,9 +1867,8 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromBytes(HInvoke* invoke // java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data) void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromChars(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainOnly, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); @@ -1907,9 +1890,8 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromChars(HInvoke* invoke // java.lang.StringFactory.newStringFromString(String toCopy) void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromString(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainAndSlowPath, - kIntrinsified); + LocationSummary* locations = new (allocator_) LocationSummary( + invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32); @@ -1948,7 +1930,7 @@ static void GenIsInfinite(LocationSummary* locations, // boolean java.lang.Float.isInfinite(float) void IntrinsicLocationsBuilderMIPS64::VisitFloatIsInfinite(HInvoke* invoke) { - CreateFPToIntLocations(arena_, invoke); + CreateFPToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitFloatIsInfinite(HInvoke* invoke) { @@ -1957,7 +1939,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitFloatIsInfinite(HInvoke* invoke) { // boolean java.lang.Double.isInfinite(double) void IntrinsicLocationsBuilderMIPS64::VisitDoubleIsInfinite(HInvoke* invoke) { - CreateFPToIntLocations(arena_, invoke); + CreateFPToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitDoubleIsInfinite(HInvoke* invoke) { @@ -1966,9 +1948,8 @@ void IntrinsicCodeGeneratorMIPS64::VisitDoubleIsInfinite(HInvoke* invoke) { // void java.lang.String.getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin) void IntrinsicLocationsBuilderMIPS64::VisitStringGetCharsNoCheck(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); locations->SetInAt(2, Location::RequiresRegister()); @@ -2083,7 +2064,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitSystemArrayCopyChar(HInvoke* invoke) // Okay, it is safe to generate inline code. LocationSummary* locations = - new (arena_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified); + new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified); // arraycopy(Object src, int srcPos, Object dest, int destPos, int length). locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1))); @@ -2277,7 +2258,7 @@ static void GenHighestOneBit(LocationSummary* locations, // int java.lang.Integer.highestOneBit(int) void IntrinsicLocationsBuilderMIPS64::VisitIntegerHighestOneBit(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitIntegerHighestOneBit(HInvoke* invoke) { @@ -2286,7 +2267,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitIntegerHighestOneBit(HInvoke* invoke) { // long java.lang.Long.highestOneBit(long) void IntrinsicLocationsBuilderMIPS64::VisitLongHighestOneBit(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitLongHighestOneBit(HInvoke* invoke) { @@ -2311,7 +2292,7 @@ static void GenLowestOneBit(LocationSummary* locations, // int java.lang.Integer.lowestOneBit(int) void IntrinsicLocationsBuilderMIPS64::VisitIntegerLowestOneBit(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitIntegerLowestOneBit(HInvoke* invoke) { @@ -2320,27 +2301,25 @@ void IntrinsicCodeGeneratorMIPS64::VisitIntegerLowestOneBit(HInvoke* invoke) { // long java.lang.Long.lowestOneBit(long) void IntrinsicLocationsBuilderMIPS64::VisitLongLowestOneBit(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitLongLowestOneBit(HInvoke* invoke) { GenLowestOneBit(invoke->GetLocations(), DataType::Type::kInt64, GetAssembler()); } -static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kCallOnMainOnly, - kIntrinsified); +static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0))); locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kFloat64)); } -static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kCallOnMainOnly, - kIntrinsified); +static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0))); @@ -2376,7 +2355,7 @@ static void GenFPFPToFPCall(HInvoke* invoke, // static double java.lang.Math.cos(double a) void IntrinsicLocationsBuilderMIPS64::VisitMathCos(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathCos(HInvoke* invoke) { @@ -2385,7 +2364,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathCos(HInvoke* invoke) { // static double java.lang.Math.sin(double a) void IntrinsicLocationsBuilderMIPS64::VisitMathSin(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathSin(HInvoke* invoke) { @@ -2394,7 +2373,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathSin(HInvoke* invoke) { // static double java.lang.Math.acos(double a) void IntrinsicLocationsBuilderMIPS64::VisitMathAcos(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathAcos(HInvoke* invoke) { @@ -2403,7 +2382,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathAcos(HInvoke* invoke) { // static double java.lang.Math.asin(double a) void IntrinsicLocationsBuilderMIPS64::VisitMathAsin(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathAsin(HInvoke* invoke) { @@ -2412,7 +2391,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathAsin(HInvoke* invoke) { // static double java.lang.Math.atan(double a) void IntrinsicLocationsBuilderMIPS64::VisitMathAtan(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathAtan(HInvoke* invoke) { @@ -2421,7 +2400,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathAtan(HInvoke* invoke) { // static double java.lang.Math.atan2(double y, double x) void IntrinsicLocationsBuilderMIPS64::VisitMathAtan2(HInvoke* invoke) { - CreateFPFPToFPCallLocations(arena_, invoke); + CreateFPFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathAtan2(HInvoke* invoke) { @@ -2430,7 +2409,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathAtan2(HInvoke* invoke) { // static double java.lang.Math.cbrt(double a) void IntrinsicLocationsBuilderMIPS64::VisitMathCbrt(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathCbrt(HInvoke* invoke) { @@ -2439,7 +2418,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathCbrt(HInvoke* invoke) { // static double java.lang.Math.cosh(double x) void IntrinsicLocationsBuilderMIPS64::VisitMathCosh(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathCosh(HInvoke* invoke) { @@ -2448,7 +2427,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathCosh(HInvoke* invoke) { // static double java.lang.Math.exp(double a) void IntrinsicLocationsBuilderMIPS64::VisitMathExp(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathExp(HInvoke* invoke) { @@ -2457,7 +2436,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathExp(HInvoke* invoke) { // static double java.lang.Math.expm1(double x) void IntrinsicLocationsBuilderMIPS64::VisitMathExpm1(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathExpm1(HInvoke* invoke) { @@ -2466,7 +2445,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathExpm1(HInvoke* invoke) { // static double java.lang.Math.hypot(double x, double y) void IntrinsicLocationsBuilderMIPS64::VisitMathHypot(HInvoke* invoke) { - CreateFPFPToFPCallLocations(arena_, invoke); + CreateFPFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathHypot(HInvoke* invoke) { @@ -2475,7 +2454,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathHypot(HInvoke* invoke) { // static double java.lang.Math.log(double a) void IntrinsicLocationsBuilderMIPS64::VisitMathLog(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathLog(HInvoke* invoke) { @@ -2484,7 +2463,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathLog(HInvoke* invoke) { // static double java.lang.Math.log10(double x) void IntrinsicLocationsBuilderMIPS64::VisitMathLog10(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathLog10(HInvoke* invoke) { @@ -2493,7 +2472,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathLog10(HInvoke* invoke) { // static double java.lang.Math.nextAfter(double start, double direction) void IntrinsicLocationsBuilderMIPS64::VisitMathNextAfter(HInvoke* invoke) { - CreateFPFPToFPCallLocations(arena_, invoke); + CreateFPFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathNextAfter(HInvoke* invoke) { @@ -2502,7 +2481,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathNextAfter(HInvoke* invoke) { // static double java.lang.Math.sinh(double x) void IntrinsicLocationsBuilderMIPS64::VisitMathSinh(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathSinh(HInvoke* invoke) { @@ -2511,7 +2490,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathSinh(HInvoke* invoke) { // static double java.lang.Math.tan(double a) void IntrinsicLocationsBuilderMIPS64::VisitMathTan(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathTan(HInvoke* invoke) { @@ -2520,7 +2499,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathTan(HInvoke* invoke) { // static double java.lang.Math.tanh(double x) void IntrinsicLocationsBuilderMIPS64::VisitMathTanh(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorMIPS64::VisitMathTanh(HInvoke* invoke) { diff --git a/compiler/optimizing/intrinsics_mips64.h b/compiler/optimizing/intrinsics_mips64.h index 6880a255c3..6085c7b29c 100644 --- a/compiler/optimizing/intrinsics_mips64.h +++ b/compiler/optimizing/intrinsics_mips64.h @@ -50,7 +50,7 @@ class IntrinsicLocationsBuilderMIPS64 FINAL : public IntrinsicVisitor { private: CodeGeneratorMIPS64* codegen_; - ArenaAllocator* arena_; + ArenaAllocator* allocator_; DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS64); }; diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc index a5916228a8..8b389ba876 100644 --- a/compiler/optimizing/intrinsics_x86.cc +++ b/compiler/optimizing/intrinsics_x86.cc @@ -46,7 +46,7 @@ static constexpr int64_t kDoubleNaN = INT64_C(0x7FF8000000000000); static constexpr int32_t kFloatNaN = INT32_C(0x7FC00000); IntrinsicLocationsBuilderX86::IntrinsicLocationsBuilderX86(CodeGeneratorX86* codegen) - : arena_(codegen->GetGraph()->GetArena()), + : allocator_(codegen->GetGraph()->GetAllocator()), codegen_(codegen) { } @@ -56,7 +56,7 @@ X86Assembler* IntrinsicCodeGeneratorX86::GetAssembler() { } ArenaAllocator* IntrinsicCodeGeneratorX86::GetAllocator() { - return codegen_->GetGraph()->GetArena(); + return codegen_->GetGraph()->GetAllocator(); } bool IntrinsicLocationsBuilderX86::TryDispatch(HInvoke* invoke) { @@ -175,10 +175,9 @@ class ReadBarrierSystemArrayCopySlowPathX86 : public SlowPathCode { #define __ assembler-> -static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke, bool is64bit) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is64bit) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); if (is64bit) { @@ -186,10 +185,9 @@ static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke, bool } } -static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke, bool is64bit) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is64bit) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresFpuRegister()); if (is64bit) { @@ -230,10 +228,10 @@ static void MoveIntToFP(LocationSummary* locations, bool is64bit, X86Assembler* } void IntrinsicLocationsBuilderX86::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) { - CreateFPToIntLocations(arena_, invoke, /* is64bit */ true); + CreateFPToIntLocations(allocator_, invoke, /* is64bit */ true); } void IntrinsicLocationsBuilderX86::VisitDoubleLongBitsToDouble(HInvoke* invoke) { - CreateIntToFPLocations(arena_, invoke, /* is64bit */ true); + CreateIntToFPLocations(allocator_, invoke, /* is64bit */ true); } void IntrinsicCodeGeneratorX86::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) { @@ -244,10 +242,10 @@ void IntrinsicCodeGeneratorX86::VisitDoubleLongBitsToDouble(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitFloatFloatToRawIntBits(HInvoke* invoke) { - CreateFPToIntLocations(arena_, invoke, /* is64bit */ false); + CreateFPToIntLocations(allocator_, invoke, /* is64bit */ false); } void IntrinsicLocationsBuilderX86::VisitFloatIntBitsToFloat(HInvoke* invoke) { - CreateIntToFPLocations(arena_, invoke, /* is64bit */ false); + CreateIntToFPLocations(allocator_, invoke, /* is64bit */ false); } void IntrinsicCodeGeneratorX86::VisitFloatFloatToRawIntBits(HInvoke* invoke) { @@ -257,26 +255,23 @@ void IntrinsicCodeGeneratorX86::VisitFloatIntBitsToFloat(HInvoke* invoke) { MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); } -static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::SameAsFirstInput()); } -static void CreateLongToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateLongToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister()); } -static void CreateLongToLongLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateLongToLongLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); } @@ -302,7 +297,7 @@ static void GenReverseBytes(LocationSummary* locations, } void IntrinsicLocationsBuilderX86::VisitIntegerReverseBytes(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitIntegerReverseBytes(HInvoke* invoke) { @@ -310,7 +305,7 @@ void IntrinsicCodeGeneratorX86::VisitIntegerReverseBytes(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitLongReverseBytes(HInvoke* invoke) { - CreateLongToLongLocations(arena_, invoke); + CreateLongToLongLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitLongReverseBytes(HInvoke* invoke) { @@ -331,7 +326,7 @@ void IntrinsicCodeGeneratorX86::VisitLongReverseBytes(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitShortReverseBytes(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitShortReverseBytes(HInvoke* invoke) { @@ -342,11 +337,10 @@ void IntrinsicCodeGeneratorX86::VisitShortReverseBytes(HInvoke* invoke) { // TODO: Consider Quick's way of doing Double abs through integer operations, as the immediate we // need is 64b. -static void CreateFloatToFloat(ArenaAllocator* arena, HInvoke* invoke) { +static void CreateFloatToFloat(ArenaAllocator* allocator, HInvoke* invoke) { // TODO: Enable memory operations when the assembler supports them. - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::SameAsFirstInput()); HInvokeStaticOrDirect* static_or_direct = invoke->AsInvokeStaticOrDirect(); @@ -401,7 +395,7 @@ static void MathAbsFP(HInvoke* invoke, } void IntrinsicLocationsBuilderX86::VisitMathAbsDouble(HInvoke* invoke) { - CreateFloatToFloat(arena_, invoke); + CreateFloatToFloat(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathAbsDouble(HInvoke* invoke) { @@ -409,17 +403,16 @@ void IntrinsicCodeGeneratorX86::VisitMathAbsDouble(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMathAbsFloat(HInvoke* invoke) { - CreateFloatToFloat(arena_, invoke); + CreateFloatToFloat(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathAbsFloat(HInvoke* invoke) { MathAbsFP(invoke, /* is64bit */ false, GetAssembler(), codegen_); } -static void CreateAbsIntLocation(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateAbsIntLocation(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RegisterLocation(EAX)); locations->SetOut(Location::SameAsFirstInput()); locations->AddTemp(Location::RegisterLocation(EDX)); @@ -444,10 +437,9 @@ static void GenAbsInteger(LocationSummary* locations, X86Assembler* assembler) { // The result is in EAX. } -static void CreateAbsLongLocation(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateAbsLongLocation(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); locations->AddTemp(Location::RequiresRegister()); @@ -480,7 +472,7 @@ static void GenAbsLong(LocationSummary* locations, X86Assembler* assembler) { } void IntrinsicLocationsBuilderX86::VisitMathAbsInt(HInvoke* invoke) { - CreateAbsIntLocation(arena_, invoke); + CreateAbsIntLocation(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathAbsInt(HInvoke* invoke) { @@ -488,7 +480,7 @@ void IntrinsicCodeGeneratorX86::VisitMathAbsInt(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMathAbsLong(HInvoke* invoke) { - CreateAbsLongLocation(arena_, invoke); + CreateAbsLongLocation(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathAbsLong(HInvoke* invoke) { @@ -598,10 +590,9 @@ static void GenMinMaxFP(HInvoke* invoke, __ Bind(&done); } -static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); // The following is sub-optimal, but all we can do for now. It would be fine to also accept @@ -616,7 +607,7 @@ static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMathMinDoubleDouble(HInvoke* invoke) { - CreateFPFPToFPLocations(arena_, invoke); + CreateFPFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathMinDoubleDouble(HInvoke* invoke) { @@ -628,7 +619,7 @@ void IntrinsicCodeGeneratorX86::VisitMathMinDoubleDouble(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMathMinFloatFloat(HInvoke* invoke) { - CreateFPFPToFPLocations(arena_, invoke); + CreateFPFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathMinFloatFloat(HInvoke* invoke) { @@ -640,7 +631,7 @@ void IntrinsicCodeGeneratorX86::VisitMathMinFloatFloat(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMathMaxDoubleDouble(HInvoke* invoke) { - CreateFPFPToFPLocations(arena_, invoke); + CreateFPFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathMaxDoubleDouble(HInvoke* invoke) { @@ -652,7 +643,7 @@ void IntrinsicCodeGeneratorX86::VisitMathMaxDoubleDouble(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMathMaxFloatFloat(HInvoke* invoke) { - CreateFPFPToFPLocations(arena_, invoke); + CreateFPFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathMaxFloatFloat(HInvoke* invoke) { @@ -718,19 +709,17 @@ static void GenMinMax(LocationSummary* locations, bool is_min, bool is_long, } } -static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); locations->SetOut(Location::SameAsFirstInput()); } -static void CreateLongLongToLongLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateLongLongToLongLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); locations->SetOut(Location::SameAsFirstInput()); @@ -739,7 +728,7 @@ static void CreateLongLongToLongLocations(ArenaAllocator* arena, HInvoke* invoke } void IntrinsicLocationsBuilderX86::VisitMathMinIntInt(HInvoke* invoke) { - CreateIntIntToIntLocations(arena_, invoke); + CreateIntIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathMinIntInt(HInvoke* invoke) { @@ -747,7 +736,7 @@ void IntrinsicCodeGeneratorX86::VisitMathMinIntInt(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMathMinLongLong(HInvoke* invoke) { - CreateLongLongToLongLocations(arena_, invoke); + CreateLongLongToLongLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathMinLongLong(HInvoke* invoke) { @@ -755,7 +744,7 @@ void IntrinsicCodeGeneratorX86::VisitMathMinLongLong(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMathMaxIntInt(HInvoke* invoke) { - CreateIntIntToIntLocations(arena_, invoke); + CreateIntIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathMaxIntInt(HInvoke* invoke) { @@ -763,23 +752,22 @@ void IntrinsicCodeGeneratorX86::VisitMathMaxIntInt(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMathMaxLongLong(HInvoke* invoke) { - CreateLongLongToLongLocations(arena_, invoke); + CreateLongLongToLongLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathMaxLongLong(HInvoke* invoke) { GenMinMax(invoke->GetLocations(), /* is_min */ false, /* is_long */ true, GetAssembler()); } -static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister()); } void IntrinsicLocationsBuilderX86::VisitMathSqrt(HInvoke* invoke) { - CreateFPToFPLocations(arena_, invoke); + CreateFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathSqrt(HInvoke* invoke) { @@ -805,18 +793,18 @@ static void InvokeOutOfLineIntrinsic(CodeGeneratorX86* codegen, HInvoke* invoke) } } -static void CreateSSE41FPToFPLocations(ArenaAllocator* arena, - HInvoke* invoke, - CodeGeneratorX86* codegen) { +static void CreateSSE41FPToFPLocations(ArenaAllocator* allocator, + HInvoke* invoke, + CodeGeneratorX86* codegen) { // Do we have instruction support? if (codegen->GetInstructionSetFeatures().HasSSE4_1()) { - CreateFPToFPLocations(arena, invoke); + CreateFPToFPLocations(allocator, invoke); return; } // We have to fall back to a call to the intrinsic. - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kCallOnMainOnly); + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetFpuRegisterAt(0))); locations->SetOut(Location::FpuRegisterLocation(XMM0)); @@ -839,7 +827,7 @@ static void GenSSE41FPToFPIntrinsic(CodeGeneratorX86* codegen, } void IntrinsicLocationsBuilderX86::VisitMathCeil(HInvoke* invoke) { - CreateSSE41FPToFPLocations(arena_, invoke, codegen_); + CreateSSE41FPToFPLocations(allocator_, invoke, codegen_); } void IntrinsicCodeGeneratorX86::VisitMathCeil(HInvoke* invoke) { @@ -847,7 +835,7 @@ void IntrinsicCodeGeneratorX86::VisitMathCeil(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMathFloor(HInvoke* invoke) { - CreateSSE41FPToFPLocations(arena_, invoke, codegen_); + CreateSSE41FPToFPLocations(allocator_, invoke, codegen_); } void IntrinsicCodeGeneratorX86::VisitMathFloor(HInvoke* invoke) { @@ -855,7 +843,7 @@ void IntrinsicCodeGeneratorX86::VisitMathFloor(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMathRint(HInvoke* invoke) { - CreateSSE41FPToFPLocations(arena_, invoke, codegen_); + CreateSSE41FPToFPLocations(allocator_, invoke, codegen_); } void IntrinsicCodeGeneratorX86::VisitMathRint(HInvoke* invoke) { @@ -867,9 +855,8 @@ void IntrinsicLocationsBuilderX86::VisitMathRoundFloat(HInvoke* invoke) { if (codegen_->GetInstructionSetFeatures().HasSSE4_1()) { HInvokeStaticOrDirect* static_or_direct = invoke->AsInvokeStaticOrDirect(); DCHECK(static_or_direct != nullptr); - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); if (static_or_direct->HasSpecialInput() && invoke->InputAt( @@ -883,8 +870,8 @@ void IntrinsicLocationsBuilderX86::VisitMathRoundFloat(HInvoke* invoke) { } // We have to fall back to a call to the intrinsic. - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainOnly); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetFpuRegisterAt(0))); locations->SetOut(Location::RegisterLocation(EAX)); @@ -951,11 +938,9 @@ void IntrinsicCodeGeneratorX86::VisitMathRoundFloat(HInvoke* invoke) { __ Bind(&done); } -static void CreateFPToFPCallLocations(ArenaAllocator* arena, - HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kCallOnMainOnly, - kIntrinsified); +static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0))); locations->SetOut(Location::FpuRegisterLocation(XMM0)); @@ -992,7 +977,7 @@ static void GenFPToFPCall(HInvoke* invoke, CodeGeneratorX86* codegen, QuickEntry } void IntrinsicLocationsBuilderX86::VisitMathCos(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathCos(HInvoke* invoke) { @@ -1000,7 +985,7 @@ void IntrinsicCodeGeneratorX86::VisitMathCos(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMathSin(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathSin(HInvoke* invoke) { @@ -1008,7 +993,7 @@ void IntrinsicCodeGeneratorX86::VisitMathSin(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMathAcos(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathAcos(HInvoke* invoke) { @@ -1016,7 +1001,7 @@ void IntrinsicCodeGeneratorX86::VisitMathAcos(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMathAsin(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathAsin(HInvoke* invoke) { @@ -1024,7 +1009,7 @@ void IntrinsicCodeGeneratorX86::VisitMathAsin(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMathAtan(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathAtan(HInvoke* invoke) { @@ -1032,7 +1017,7 @@ void IntrinsicCodeGeneratorX86::VisitMathAtan(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMathCbrt(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathCbrt(HInvoke* invoke) { @@ -1040,7 +1025,7 @@ void IntrinsicCodeGeneratorX86::VisitMathCbrt(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMathCosh(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathCosh(HInvoke* invoke) { @@ -1048,7 +1033,7 @@ void IntrinsicCodeGeneratorX86::VisitMathCosh(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMathExp(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathExp(HInvoke* invoke) { @@ -1056,7 +1041,7 @@ void IntrinsicCodeGeneratorX86::VisitMathExp(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMathExpm1(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathExpm1(HInvoke* invoke) { @@ -1064,7 +1049,7 @@ void IntrinsicCodeGeneratorX86::VisitMathExpm1(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMathLog(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathLog(HInvoke* invoke) { @@ -1072,7 +1057,7 @@ void IntrinsicCodeGeneratorX86::VisitMathLog(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMathLog10(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathLog10(HInvoke* invoke) { @@ -1080,7 +1065,7 @@ void IntrinsicCodeGeneratorX86::VisitMathLog10(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMathSinh(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathSinh(HInvoke* invoke) { @@ -1088,7 +1073,7 @@ void IntrinsicCodeGeneratorX86::VisitMathSinh(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMathTan(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathTan(HInvoke* invoke) { @@ -1096,18 +1081,16 @@ void IntrinsicCodeGeneratorX86::VisitMathTan(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMathTanh(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathTanh(HInvoke* invoke) { GenFPToFPCall(invoke, codegen_, kQuickTanh); } -static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, - HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kCallOnMainOnly, - kIntrinsified); +static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0))); locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1))); @@ -1115,7 +1098,7 @@ static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, } void IntrinsicLocationsBuilderX86::VisitMathAtan2(HInvoke* invoke) { - CreateFPFPToFPCallLocations(arena_, invoke); + CreateFPFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathAtan2(HInvoke* invoke) { @@ -1123,7 +1106,7 @@ void IntrinsicCodeGeneratorX86::VisitMathAtan2(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMathHypot(HInvoke* invoke) { - CreateFPFPToFPCallLocations(arena_, invoke); + CreateFPFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathHypot(HInvoke* invoke) { @@ -1131,7 +1114,7 @@ void IntrinsicCodeGeneratorX86::VisitMathHypot(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMathNextAfter(HInvoke* invoke) { - CreateFPFPToFPCallLocations(arena_, invoke); + CreateFPFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMathNextAfter(HInvoke* invoke) { @@ -1174,7 +1157,7 @@ void IntrinsicLocationsBuilderX86::VisitSystemArrayCopyChar(HInvoke* invoke) { // Okay, it is safe to generate inline code. LocationSummary* locations = - new (arena_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified); + new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified); // arraycopy(Object src, int srcPos, Object dest, int destPos, int length). locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1))); @@ -1336,9 +1319,8 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopyChar(HInvoke* invoke) { void IntrinsicLocationsBuilderX86::VisitStringCompareTo(HInvoke* invoke) { // The inputs plus one temp. - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainAndSlowPath, - kIntrinsified); + LocationSummary* locations = new (allocator_) LocationSummary( + invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); @@ -1363,9 +1345,8 @@ void IntrinsicCodeGeneratorX86::VisitStringCompareTo(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitStringEquals(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); @@ -1655,7 +1636,7 @@ static void GenerateStringIndexOf(HInvoke* invoke, } void IntrinsicLocationsBuilderX86::VisitStringIndexOf(HInvoke* invoke) { - CreateStringIndexOfLocations(invoke, arena_, /* start_at_zero */ true); + CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ true); } void IntrinsicCodeGeneratorX86::VisitStringIndexOf(HInvoke* invoke) { @@ -1663,7 +1644,7 @@ void IntrinsicCodeGeneratorX86::VisitStringIndexOf(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitStringIndexOfAfter(HInvoke* invoke) { - CreateStringIndexOfLocations(invoke, arena_, /* start_at_zero */ false); + CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ false); } void IntrinsicCodeGeneratorX86::VisitStringIndexOfAfter(HInvoke* invoke) { @@ -1672,9 +1653,8 @@ void IntrinsicCodeGeneratorX86::VisitStringIndexOfAfter(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitStringNewStringFromBytes(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainAndSlowPath, - kIntrinsified); + LocationSummary* locations = new (allocator_) LocationSummary( + invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); @@ -1699,9 +1679,8 @@ void IntrinsicCodeGeneratorX86::VisitStringNewStringFromBytes(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitStringNewStringFromChars(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainOnly, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); @@ -1721,9 +1700,8 @@ void IntrinsicCodeGeneratorX86::VisitStringNewStringFromChars(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitStringNewStringFromString(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainAndSlowPath, - kIntrinsified); + LocationSummary* locations = new (allocator_) LocationSummary( + invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); locations->SetOut(Location::RegisterLocation(EAX)); @@ -1746,9 +1724,8 @@ void IntrinsicCodeGeneratorX86::VisitStringNewStringFromString(HInvoke* invoke) void IntrinsicLocationsBuilderX86::VisitStringGetCharsNoCheck(HInvoke* invoke) { // public void getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin); - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1))); // Place srcEnd in ECX to save a move below. @@ -1875,7 +1852,7 @@ static void GenPeek(LocationSummary* locations, DataType::Type size, X86Assemble } void IntrinsicLocationsBuilderX86::VisitMemoryPeekByte(HInvoke* invoke) { - CreateLongToIntLocations(arena_, invoke); + CreateLongToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMemoryPeekByte(HInvoke* invoke) { @@ -1883,7 +1860,7 @@ void IntrinsicCodeGeneratorX86::VisitMemoryPeekByte(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMemoryPeekIntNative(HInvoke* invoke) { - CreateLongToIntLocations(arena_, invoke); + CreateLongToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMemoryPeekIntNative(HInvoke* invoke) { @@ -1891,7 +1868,7 @@ void IntrinsicCodeGeneratorX86::VisitMemoryPeekIntNative(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMemoryPeekLongNative(HInvoke* invoke) { - CreateLongToLongLocations(arena_, invoke); + CreateLongToLongLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMemoryPeekLongNative(HInvoke* invoke) { @@ -1899,18 +1876,18 @@ void IntrinsicCodeGeneratorX86::VisitMemoryPeekLongNative(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMemoryPeekShortNative(HInvoke* invoke) { - CreateLongToIntLocations(arena_, invoke); + CreateLongToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86::VisitMemoryPeekShortNative(HInvoke* invoke) { GenPeek(invoke->GetLocations(), DataType::Type::kInt16, GetAssembler()); } -static void CreateLongIntToVoidLocations(ArenaAllocator* arena, DataType::Type size, +static void CreateLongIntToVoidLocations(ArenaAllocator* allocator, + DataType::Type size, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); HInstruction* value = invoke->InputAt(1); if (size == DataType::Type::kInt8) { @@ -1967,7 +1944,7 @@ static void GenPoke(LocationSummary* locations, DataType::Type size, X86Assemble } void IntrinsicLocationsBuilderX86::VisitMemoryPokeByte(HInvoke* invoke) { - CreateLongIntToVoidLocations(arena_, DataType::Type::kInt8, invoke); + CreateLongIntToVoidLocations(allocator_, DataType::Type::kInt8, invoke); } void IntrinsicCodeGeneratorX86::VisitMemoryPokeByte(HInvoke* invoke) { @@ -1975,7 +1952,7 @@ void IntrinsicCodeGeneratorX86::VisitMemoryPokeByte(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMemoryPokeIntNative(HInvoke* invoke) { - CreateLongIntToVoidLocations(arena_, DataType::Type::kInt32, invoke); + CreateLongIntToVoidLocations(allocator_, DataType::Type::kInt32, invoke); } void IntrinsicCodeGeneratorX86::VisitMemoryPokeIntNative(HInvoke* invoke) { @@ -1983,7 +1960,7 @@ void IntrinsicCodeGeneratorX86::VisitMemoryPokeIntNative(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMemoryPokeLongNative(HInvoke* invoke) { - CreateLongIntToVoidLocations(arena_, DataType::Type::kInt64, invoke); + CreateLongIntToVoidLocations(allocator_, DataType::Type::kInt64, invoke); } void IntrinsicCodeGeneratorX86::VisitMemoryPokeLongNative(HInvoke* invoke) { @@ -1991,7 +1968,7 @@ void IntrinsicCodeGeneratorX86::VisitMemoryPokeLongNative(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitMemoryPokeShortNative(HInvoke* invoke) { - CreateLongIntToVoidLocations(arena_, DataType::Type::kInt16, invoke); + CreateLongIntToVoidLocations(allocator_, DataType::Type::kInt16, invoke); } void IntrinsicCodeGeneratorX86::VisitMemoryPokeShortNative(HInvoke* invoke) { @@ -1999,9 +1976,8 @@ void IntrinsicCodeGeneratorX86::VisitMemoryPokeShortNative(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitThreadCurrentThread(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetOut(Location::RequiresRegister()); } @@ -2071,18 +2047,19 @@ static void GenUnsafeGet(HInvoke* invoke, } } -static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, +static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke, DataType::Type type, bool is_volatile) { bool can_call = kEmitCompilerReadBarrier && (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject || invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile); - LocationSummary* locations = new (arena) LocationSummary(invoke, - (can_call - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall), - kIntrinsified); + LocationSummary* locations = + new (allocator) LocationSummary(invoke, + can_call + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall, + kIntrinsified); if (can_call && kUseBakerReadBarrier) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } @@ -2104,23 +2081,26 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, } void IntrinsicLocationsBuilderX86::VisitUnsafeGet(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32, /* is_volatile */ false); + CreateIntIntIntToIntLocations( + allocator_, invoke, DataType::Type::kInt32, /* is_volatile */ false); } void IntrinsicLocationsBuilderX86::VisitUnsafeGetVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32, /* is_volatile */ true); + CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32, /* is_volatile */ true); } void IntrinsicLocationsBuilderX86::VisitUnsafeGetLong(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64, /* is_volatile */ false); + CreateIntIntIntToIntLocations( + allocator_, invoke, DataType::Type::kInt64, /* is_volatile */ false); } void IntrinsicLocationsBuilderX86::VisitUnsafeGetLongVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64, /* is_volatile */ true); + CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64, /* is_volatile */ true); } void IntrinsicLocationsBuilderX86::VisitUnsafeGetObject(HInvoke* invoke) { CreateIntIntIntToIntLocations( - arena_, invoke, DataType::Type::kReference, /* is_volatile */ false); + allocator_, invoke, DataType::Type::kReference, /* is_volatile */ false); } void IntrinsicLocationsBuilderX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference, /* is_volatile */ true); + CreateIntIntIntToIntLocations( + allocator_, invoke, DataType::Type::kReference, /* is_volatile */ true); } @@ -2144,13 +2124,12 @@ void IntrinsicCodeGeneratorX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { } -static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* arena, +static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* allocator, DataType::Type type, HInvoke* invoke, bool is_volatile) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::NoLocation()); // Unused receiver. locations->SetInAt(1, Location::RequiresRegister()); locations->SetInAt(2, Location::RequiresRegister()); @@ -2168,39 +2147,39 @@ static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* arena, void IntrinsicLocationsBuilderX86::VisitUnsafePut(HInvoke* invoke) { CreateIntIntIntIntToVoidPlusTempsLocations( - arena_, DataType::Type::kInt32, invoke, /* is_volatile */ false); + allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ false); } void IntrinsicLocationsBuilderX86::VisitUnsafePutOrdered(HInvoke* invoke) { CreateIntIntIntIntToVoidPlusTempsLocations( - arena_, DataType::Type::kInt32, invoke, /* is_volatile */ false); + allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ false); } void IntrinsicLocationsBuilderX86::VisitUnsafePutVolatile(HInvoke* invoke) { CreateIntIntIntIntToVoidPlusTempsLocations( - arena_, DataType::Type::kInt32, invoke, /* is_volatile */ true); + allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ true); } void IntrinsicLocationsBuilderX86::VisitUnsafePutObject(HInvoke* invoke) { CreateIntIntIntIntToVoidPlusTempsLocations( - arena_, DataType::Type::kReference, invoke, /* is_volatile */ false); + allocator_, DataType::Type::kReference, invoke, /* is_volatile */ false); } void IntrinsicLocationsBuilderX86::VisitUnsafePutObjectOrdered(HInvoke* invoke) { CreateIntIntIntIntToVoidPlusTempsLocations( - arena_, DataType::Type::kReference, invoke, /* is_volatile */ false); + allocator_, DataType::Type::kReference, invoke, /* is_volatile */ false); } void IntrinsicLocationsBuilderX86::VisitUnsafePutObjectVolatile(HInvoke* invoke) { CreateIntIntIntIntToVoidPlusTempsLocations( - arena_, DataType::Type::kReference, invoke, /* is_volatile */ true); + allocator_, DataType::Type::kReference, invoke, /* is_volatile */ true); } void IntrinsicLocationsBuilderX86::VisitUnsafePutLong(HInvoke* invoke) { CreateIntIntIntIntToVoidPlusTempsLocations( - arena_, DataType::Type::kInt64, invoke, /* is_volatile */ false); + allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ false); } void IntrinsicLocationsBuilderX86::VisitUnsafePutLongOrdered(HInvoke* invoke) { CreateIntIntIntIntToVoidPlusTempsLocations( - arena_, DataType::Type::kInt64, invoke, /* is_volatile */ false); + allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ false); } void IntrinsicLocationsBuilderX86::VisitUnsafePutLongVolatile(HInvoke* invoke) { CreateIntIntIntIntToVoidPlusTempsLocations( - arena_, DataType::Type::kInt64, invoke, /* is_volatile */ true); + allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ true); } // We don't care for ordered: it requires an AnyStore barrier, which is already given by the x86 @@ -2282,17 +2261,18 @@ void IntrinsicCodeGeneratorX86::VisitUnsafePutLongVolatile(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ true, codegen_); } -static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena, +static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator, DataType::Type type, HInvoke* invoke) { bool can_call = kEmitCompilerReadBarrier && kUseBakerReadBarrier && (invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject); - LocationSummary* locations = new (arena) LocationSummary(invoke, - (can_call - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall), - kIntrinsified); + LocationSummary* locations = + new (allocator) LocationSummary(invoke, + can_call + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall, + kIntrinsified); locations->SetInAt(0, Location::NoLocation()); // Unused receiver. locations->SetInAt(1, Location::RequiresRegister()); // Offset is a long, but in 32 bit mode, we only need the low word. @@ -2320,11 +2300,11 @@ static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena, } void IntrinsicLocationsBuilderX86::VisitUnsafeCASInt(HInvoke* invoke) { - CreateIntIntIntIntIntToInt(arena_, DataType::Type::kInt32, invoke); + CreateIntIntIntIntIntToInt(allocator_, DataType::Type::kInt32, invoke); } void IntrinsicLocationsBuilderX86::VisitUnsafeCASLong(HInvoke* invoke) { - CreateIntIntIntIntIntToInt(arena_, DataType::Type::kInt64, invoke); + CreateIntIntIntIntIntToInt(allocator_, DataType::Type::kInt64, invoke); } void IntrinsicLocationsBuilderX86::VisitUnsafeCASObject(HInvoke* invoke) { @@ -2334,7 +2314,7 @@ void IntrinsicLocationsBuilderX86::VisitUnsafeCASObject(HInvoke* invoke) { return; } - CreateIntIntIntIntIntToInt(arena_, DataType::Type::kReference, invoke); + CreateIntIntIntIntIntToInt(allocator_, DataType::Type::kReference, invoke); } static void GenCAS(DataType::Type type, HInvoke* invoke, CodeGeneratorX86* codegen) { @@ -2473,9 +2453,8 @@ void IntrinsicCodeGeneratorX86::VisitUnsafeCASObject(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitIntegerReverse(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::SameAsFirstInput()); locations->AddTemp(Location::RequiresRegister()); @@ -2516,9 +2495,8 @@ void IntrinsicCodeGeneratorX86::VisitIntegerReverse(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitLongReverse(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::SameAsFirstInput()); locations->AddTemp(Location::RequiresRegister()); @@ -2553,15 +2531,14 @@ void IntrinsicCodeGeneratorX86::VisitLongReverse(HInvoke* invoke) { } static void CreateBitCountLocations( - ArenaAllocator* arena, CodeGeneratorX86* codegen, HInvoke* invoke, bool is_long) { + ArenaAllocator* allocator, CodeGeneratorX86* codegen, HInvoke* invoke, bool is_long) { if (!codegen->GetInstructionSetFeatures().HasPopCnt()) { // Do nothing if there is no popcnt support. This results in generating // a call for the intrinsic rather than direct code. return; } - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); if (is_long) { locations->AddTemp(Location::RequiresRegister()); } @@ -2610,7 +2587,7 @@ static void GenBitCount(X86Assembler* assembler, } void IntrinsicLocationsBuilderX86::VisitIntegerBitCount(HInvoke* invoke) { - CreateBitCountLocations(arena_, codegen_, invoke, /* is_long */ false); + CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long */ false); } void IntrinsicCodeGeneratorX86::VisitIntegerBitCount(HInvoke* invoke) { @@ -2618,17 +2595,16 @@ void IntrinsicCodeGeneratorX86::VisitIntegerBitCount(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitLongBitCount(HInvoke* invoke) { - CreateBitCountLocations(arena_, codegen_, invoke, /* is_long */ true); + CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long */ true); } void IntrinsicCodeGeneratorX86::VisitLongBitCount(HInvoke* invoke) { GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ true); } -static void CreateLeadingZeroLocations(ArenaAllocator* arena, HInvoke* invoke, bool is_long) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateLeadingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_long) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); if (is_long) { locations->SetInAt(0, Location::RequiresRegister()); } else { @@ -2715,7 +2691,7 @@ static void GenLeadingZeros(X86Assembler* assembler, } void IntrinsicLocationsBuilderX86::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { - CreateLeadingZeroLocations(arena_, invoke, /* is_long */ false); + CreateLeadingZeroLocations(allocator_, invoke, /* is_long */ false); } void IntrinsicCodeGeneratorX86::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { @@ -2723,17 +2699,16 @@ void IntrinsicCodeGeneratorX86::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke } void IntrinsicLocationsBuilderX86::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { - CreateLeadingZeroLocations(arena_, invoke, /* is_long */ true); + CreateLeadingZeroLocations(allocator_, invoke, /* is_long */ true); } void IntrinsicCodeGeneratorX86::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true); } -static void CreateTrailingZeroLocations(ArenaAllocator* arena, HInvoke* invoke, bool is_long) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateTrailingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_long) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); if (is_long) { locations->SetInAt(0, Location::RequiresRegister()); } else { @@ -2807,7 +2782,7 @@ static void GenTrailingZeros(X86Assembler* assembler, } void IntrinsicLocationsBuilderX86::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) { - CreateTrailingZeroLocations(arena_, invoke, /* is_long */ false); + CreateTrailingZeroLocations(allocator_, invoke, /* is_long */ false); } void IntrinsicCodeGeneratorX86::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) { @@ -2815,7 +2790,7 @@ void IntrinsicCodeGeneratorX86::VisitIntegerNumberOfTrailingZeros(HInvoke* invok } void IntrinsicLocationsBuilderX86::VisitLongNumberOfTrailingZeros(HInvoke* invoke) { - CreateTrailingZeroLocations(arena_, invoke, /* is_long */ true); + CreateTrailingZeroLocations(allocator_, invoke, /* is_long */ true); } void IntrinsicCodeGeneratorX86::VisitLongNumberOfTrailingZeros(HInvoke* invoke) { @@ -3352,9 +3327,8 @@ void IntrinsicCodeGeneratorX86::VisitIntegerValueOf(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86::VisitThreadInterrupted(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetOut(Location::RequiresRegister()); } diff --git a/compiler/optimizing/intrinsics_x86.h b/compiler/optimizing/intrinsics_x86.h index 22f11b1d34..ba3ca0a410 100644 --- a/compiler/optimizing/intrinsics_x86.h +++ b/compiler/optimizing/intrinsics_x86.h @@ -49,7 +49,7 @@ class IntrinsicLocationsBuilderX86 FINAL : public IntrinsicVisitor { bool TryDispatch(HInvoke* invoke); private: - ArenaAllocator* arena_; + ArenaAllocator* allocator_; CodeGeneratorX86* codegen_; DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderX86); diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index a2545ee3d8..6337900b71 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -41,7 +41,7 @@ namespace art { namespace x86_64 { IntrinsicLocationsBuilderX86_64::IntrinsicLocationsBuilderX86_64(CodeGeneratorX86_64* codegen) - : arena_(codegen->GetGraph()->GetArena()), codegen_(codegen) { + : allocator_(codegen->GetGraph()->GetAllocator()), codegen_(codegen) { } X86_64Assembler* IntrinsicCodeGeneratorX86_64::GetAssembler() { @@ -49,7 +49,7 @@ X86_64Assembler* IntrinsicCodeGeneratorX86_64::GetAssembler() { } ArenaAllocator* IntrinsicCodeGeneratorX86_64::GetAllocator() { - return codegen_->GetGraph()->GetArena(); + return codegen_->GetGraph()->GetAllocator(); } bool IntrinsicLocationsBuilderX86_64::TryDispatch(HInvoke* invoke) { @@ -128,18 +128,16 @@ class ReadBarrierSystemArrayCopySlowPathX86_64 : public SlowPathCode { #define __ assembler-> -static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); } -static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresFpuRegister()); } @@ -157,10 +155,10 @@ static void MoveIntToFP(LocationSummary* locations, bool is64bit, X86_64Assemble } void IntrinsicLocationsBuilderX86_64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) { - CreateFPToIntLocations(arena_, invoke); + CreateFPToIntLocations(allocator_, invoke); } void IntrinsicLocationsBuilderX86_64::VisitDoubleLongBitsToDouble(HInvoke* invoke) { - CreateIntToFPLocations(arena_, invoke); + CreateIntToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) { @@ -171,10 +169,10 @@ void IntrinsicCodeGeneratorX86_64::VisitDoubleLongBitsToDouble(HInvoke* invoke) } void IntrinsicLocationsBuilderX86_64::VisitFloatFloatToRawIntBits(HInvoke* invoke) { - CreateFPToIntLocations(arena_, invoke); + CreateFPToIntLocations(allocator_, invoke); } void IntrinsicLocationsBuilderX86_64::VisitFloatIntBitsToFloat(HInvoke* invoke) { - CreateIntToFPLocations(arena_, invoke); + CreateIntToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitFloatFloatToRawIntBits(HInvoke* invoke) { @@ -184,10 +182,9 @@ void IntrinsicCodeGeneratorX86_64::VisitFloatIntBitsToFloat(HInvoke* invoke) { MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); } -static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::SameAsFirstInput()); } @@ -216,7 +213,7 @@ static void GenReverseBytes(LocationSummary* locations, } void IntrinsicLocationsBuilderX86_64::VisitIntegerReverseBytes(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitIntegerReverseBytes(HInvoke* invoke) { @@ -224,7 +221,7 @@ void IntrinsicCodeGeneratorX86_64::VisitIntegerReverseBytes(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitLongReverseBytes(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitLongReverseBytes(HInvoke* invoke) { @@ -232,7 +229,7 @@ void IntrinsicCodeGeneratorX86_64::VisitLongReverseBytes(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitShortReverseBytes(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitShortReverseBytes(HInvoke* invoke) { @@ -243,11 +240,10 @@ void IntrinsicCodeGeneratorX86_64::VisitShortReverseBytes(HInvoke* invoke) { // TODO: Consider Quick's way of doing Double abs through integer operations, as the immediate we // need is 64b. -static void CreateFloatToFloatPlusTemps(ArenaAllocator* arena, HInvoke* invoke) { +static void CreateFloatToFloatPlusTemps(ArenaAllocator* allocator, HInvoke* invoke) { // TODO: Enable memory operations when the assembler supports them. - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::SameAsFirstInput()); locations->AddTemp(Location::RequiresFpuRegister()); // FP reg to hold mask. @@ -275,7 +271,7 @@ static void MathAbsFP(LocationSummary* locations, } void IntrinsicLocationsBuilderX86_64::VisitMathAbsDouble(HInvoke* invoke) { - CreateFloatToFloatPlusTemps(arena_, invoke); + CreateFloatToFloatPlusTemps(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathAbsDouble(HInvoke* invoke) { @@ -283,17 +279,16 @@ void IntrinsicCodeGeneratorX86_64::VisitMathAbsDouble(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathAbsFloat(HInvoke* invoke) { - CreateFloatToFloatPlusTemps(arena_, invoke); + CreateFloatToFloatPlusTemps(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathAbsFloat(HInvoke* invoke) { MathAbsFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler(), codegen_); } -static void CreateIntToIntPlusTemp(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateIntToIntPlusTemp(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::SameAsFirstInput()); locations->AddTemp(Location::RequiresRegister()); @@ -322,7 +317,7 @@ static void GenAbsInteger(LocationSummary* locations, bool is64bit, X86_64Assemb } void IntrinsicLocationsBuilderX86_64::VisitMathAbsInt(HInvoke* invoke) { - CreateIntToIntPlusTemp(arena_, invoke); + CreateIntToIntPlusTemp(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathAbsInt(HInvoke* invoke) { @@ -330,7 +325,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathAbsInt(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathAbsLong(HInvoke* invoke) { - CreateIntToIntPlusTemp(arena_, invoke); + CreateIntToIntPlusTemp(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathAbsLong(HInvoke* invoke) { @@ -421,10 +416,9 @@ static void GenMinMaxFP(LocationSummary* locations, __ Bind(&done); } -static void CreateFPFPToFP(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateFPFPToFP(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::RequiresFpuRegister()); // The following is sub-optimal, but all we can do for now. It would be fine to also accept @@ -433,7 +427,7 @@ static void CreateFPFPToFP(ArenaAllocator* arena, HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathMinDoubleDouble(HInvoke* invoke) { - CreateFPFPToFP(arena_, invoke); + CreateFPFPToFP(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathMinDoubleDouble(HInvoke* invoke) { @@ -442,7 +436,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathMinDoubleDouble(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathMinFloatFloat(HInvoke* invoke) { - CreateFPFPToFP(arena_, invoke); + CreateFPFPToFP(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathMinFloatFloat(HInvoke* invoke) { @@ -451,7 +445,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathMinFloatFloat(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathMaxDoubleDouble(HInvoke* invoke) { - CreateFPFPToFP(arena_, invoke); + CreateFPFPToFP(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathMaxDoubleDouble(HInvoke* invoke) { @@ -460,7 +454,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathMaxDoubleDouble(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathMaxFloatFloat(HInvoke* invoke) { - CreateFPFPToFP(arena_, invoke); + CreateFPFPToFP(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathMaxFloatFloat(HInvoke* invoke) { @@ -500,17 +494,16 @@ static void GenMinMax(LocationSummary* locations, bool is_min, bool is_long, __ cmov(is_min ? Condition::kGreater : Condition::kLess, out, op2, is_long); } -static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); locations->SetOut(Location::SameAsFirstInput()); } void IntrinsicLocationsBuilderX86_64::VisitMathMinIntInt(HInvoke* invoke) { - CreateIntIntToIntLocations(arena_, invoke); + CreateIntIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathMinIntInt(HInvoke* invoke) { @@ -518,7 +511,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathMinIntInt(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathMinLongLong(HInvoke* invoke) { - CreateIntIntToIntLocations(arena_, invoke); + CreateIntIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathMinLongLong(HInvoke* invoke) { @@ -526,7 +519,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathMinLongLong(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathMaxIntInt(HInvoke* invoke) { - CreateIntIntToIntLocations(arena_, invoke); + CreateIntIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathMaxIntInt(HInvoke* invoke) { @@ -534,23 +527,22 @@ void IntrinsicCodeGeneratorX86_64::VisitMathMaxIntInt(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathMaxLongLong(HInvoke* invoke) { - CreateIntIntToIntLocations(arena_, invoke); + CreateIntIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathMaxLongLong(HInvoke* invoke) { GenMinMax(invoke->GetLocations(), /* is_min */ false, /* is_long */ true, GetAssembler()); } -static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresFpuRegister()); } void IntrinsicLocationsBuilderX86_64::VisitMathSqrt(HInvoke* invoke) { - CreateFPToFPLocations(arena_, invoke); + CreateFPToFPLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathSqrt(HInvoke* invoke) { @@ -576,18 +568,18 @@ static void InvokeOutOfLineIntrinsic(CodeGeneratorX86_64* codegen, HInvoke* invo } } -static void CreateSSE41FPToFPLocations(ArenaAllocator* arena, - HInvoke* invoke, - CodeGeneratorX86_64* codegen) { +static void CreateSSE41FPToFPLocations(ArenaAllocator* allocator, + HInvoke* invoke, + CodeGeneratorX86_64* codegen) { // Do we have instruction support? if (codegen->GetInstructionSetFeatures().HasSSE4_1()) { - CreateFPToFPLocations(arena, invoke); + CreateFPToFPLocations(allocator, invoke); return; } // We have to fall back to a call to the intrinsic. - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kCallOnMainOnly); + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetFpuRegisterAt(0))); locations->SetOut(Location::FpuRegisterLocation(XMM0)); @@ -610,7 +602,7 @@ static void GenSSE41FPToFPIntrinsic(CodeGeneratorX86_64* codegen, } void IntrinsicLocationsBuilderX86_64::VisitMathCeil(HInvoke* invoke) { - CreateSSE41FPToFPLocations(arena_, invoke, codegen_); + CreateSSE41FPToFPLocations(allocator_, invoke, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitMathCeil(HInvoke* invoke) { @@ -618,7 +610,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathCeil(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathFloor(HInvoke* invoke) { - CreateSSE41FPToFPLocations(arena_, invoke, codegen_); + CreateSSE41FPToFPLocations(allocator_, invoke, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitMathFloor(HInvoke* invoke) { @@ -626,21 +618,20 @@ void IntrinsicCodeGeneratorX86_64::VisitMathFloor(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathRint(HInvoke* invoke) { - CreateSSE41FPToFPLocations(arena_, invoke, codegen_); + CreateSSE41FPToFPLocations(allocator_, invoke, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitMathRint(HInvoke* invoke) { GenSSE41FPToFPIntrinsic(codegen_, invoke, GetAssembler(), 0); } -static void CreateSSE41FPToIntLocations(ArenaAllocator* arena, - HInvoke* invoke, - CodeGeneratorX86_64* codegen) { +static void CreateSSE41FPToIntLocations(ArenaAllocator* allocator, + HInvoke* invoke, + CodeGeneratorX86_64* codegen) { // Do we have instruction support? if (codegen->GetInstructionSetFeatures().HasSSE4_1()) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); locations->AddTemp(Location::RequiresFpuRegister()); @@ -649,8 +640,8 @@ static void CreateSSE41FPToIntLocations(ArenaAllocator* arena, } // We have to fall back to a call to the intrinsic. - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kCallOnMainOnly); + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetFpuRegisterAt(0))); locations->SetOut(Location::RegisterLocation(RAX)); @@ -659,7 +650,7 @@ static void CreateSSE41FPToIntLocations(ArenaAllocator* arena, } void IntrinsicLocationsBuilderX86_64::VisitMathRoundFloat(HInvoke* invoke) { - CreateSSE41FPToIntLocations(arena_, invoke, codegen_); + CreateSSE41FPToIntLocations(allocator_, invoke, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitMathRoundFloat(HInvoke* invoke) { @@ -703,7 +694,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathRoundFloat(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathRoundDouble(HInvoke* invoke) { - CreateSSE41FPToIntLocations(arena_, invoke, codegen_); + CreateSSE41FPToIntLocations(allocator_, invoke, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitMathRoundDouble(HInvoke* invoke) { @@ -746,11 +737,9 @@ void IntrinsicCodeGeneratorX86_64::VisitMathRoundDouble(HInvoke* invoke) { __ Bind(&done); } -static void CreateFPToFPCallLocations(ArenaAllocator* arena, - HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kCallOnMainOnly, - kIntrinsified); +static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0))); locations->SetOut(Location::FpuRegisterLocation(XMM0)); @@ -773,7 +762,7 @@ static void GenFPToFPCall(HInvoke* invoke, CodeGeneratorX86_64* codegen, } void IntrinsicLocationsBuilderX86_64::VisitMathCos(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathCos(HInvoke* invoke) { @@ -781,7 +770,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathCos(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathSin(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathSin(HInvoke* invoke) { @@ -789,7 +778,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathSin(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathAcos(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathAcos(HInvoke* invoke) { @@ -797,7 +786,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathAcos(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathAsin(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathAsin(HInvoke* invoke) { @@ -805,7 +794,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathAsin(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathAtan(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathAtan(HInvoke* invoke) { @@ -813,7 +802,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathAtan(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathCbrt(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathCbrt(HInvoke* invoke) { @@ -821,7 +810,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathCbrt(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathCosh(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathCosh(HInvoke* invoke) { @@ -829,7 +818,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathCosh(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathExp(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathExp(HInvoke* invoke) { @@ -837,7 +826,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathExp(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathExpm1(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathExpm1(HInvoke* invoke) { @@ -845,7 +834,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathExpm1(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathLog(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathLog(HInvoke* invoke) { @@ -853,7 +842,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathLog(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathLog10(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathLog10(HInvoke* invoke) { @@ -861,7 +850,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathLog10(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathSinh(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathSinh(HInvoke* invoke) { @@ -869,7 +858,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathSinh(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathTan(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathTan(HInvoke* invoke) { @@ -877,18 +866,16 @@ void IntrinsicCodeGeneratorX86_64::VisitMathTan(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathTanh(HInvoke* invoke) { - CreateFPToFPCallLocations(arena_, invoke); + CreateFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathTanh(HInvoke* invoke) { GenFPToFPCall(invoke, codegen_, kQuickTanh); } -static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, - HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kCallOnMainOnly, - kIntrinsified); +static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0))); locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1))); @@ -903,7 +890,7 @@ static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, } void IntrinsicLocationsBuilderX86_64::VisitMathAtan2(HInvoke* invoke) { - CreateFPFPToFPCallLocations(arena_, invoke); + CreateFPFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathAtan2(HInvoke* invoke) { @@ -911,7 +898,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathAtan2(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathHypot(HInvoke* invoke) { - CreateFPFPToFPCallLocations(arena_, invoke); + CreateFPFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathHypot(HInvoke* invoke) { @@ -919,7 +906,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathHypot(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMathNextAfter(HInvoke* invoke) { - CreateFPFPToFPCallLocations(arena_, invoke); + CreateFPFPToFPCallLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMathNextAfter(HInvoke* invoke) { @@ -949,9 +936,8 @@ void IntrinsicLocationsBuilderX86_64::VisitSystemArrayCopyChar(HInvoke* invoke) } } - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnSlowPath, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified); // arraycopy(Object src, int src_pos, Object dest, int dest_pos, int length). locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1))); @@ -1507,9 +1493,8 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitStringCompareTo(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainAndSlowPath, - kIntrinsified); + LocationSummary* locations = new (allocator_) LocationSummary( + invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); @@ -1534,9 +1519,8 @@ void IntrinsicCodeGeneratorX86_64::VisitStringCompareTo(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitStringEquals(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); @@ -1812,7 +1796,7 @@ static void GenerateStringIndexOf(HInvoke* invoke, } void IntrinsicLocationsBuilderX86_64::VisitStringIndexOf(HInvoke* invoke) { - CreateStringIndexOfLocations(invoke, arena_, /* start_at_zero */ true); + CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ true); } void IntrinsicCodeGeneratorX86_64::VisitStringIndexOf(HInvoke* invoke) { @@ -1820,7 +1804,7 @@ void IntrinsicCodeGeneratorX86_64::VisitStringIndexOf(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitStringIndexOfAfter(HInvoke* invoke) { - CreateStringIndexOfLocations(invoke, arena_, /* start_at_zero */ false); + CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ false); } void IntrinsicCodeGeneratorX86_64::VisitStringIndexOfAfter(HInvoke* invoke) { @@ -1829,9 +1813,8 @@ void IntrinsicCodeGeneratorX86_64::VisitStringIndexOfAfter(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromBytes(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainAndSlowPath, - kIntrinsified); + LocationSummary* locations = new (allocator_) LocationSummary( + invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); @@ -1856,9 +1839,8 @@ void IntrinsicCodeGeneratorX86_64::VisitStringNewStringFromBytes(HInvoke* invoke } void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromChars(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainOnly, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); @@ -1878,9 +1860,8 @@ void IntrinsicCodeGeneratorX86_64::VisitStringNewStringFromChars(HInvoke* invoke } void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromString(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCallOnMainAndSlowPath, - kIntrinsified); + LocationSummary* locations = new (allocator_) LocationSummary( + invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); locations->SetOut(Location::RegisterLocation(RAX)); @@ -1903,9 +1884,8 @@ void IntrinsicCodeGeneratorX86_64::VisitStringNewStringFromString(HInvoke* invok void IntrinsicLocationsBuilderX86_64::VisitStringGetCharsNoCheck(HInvoke* invoke) { // public void getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin); - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1))); locations->SetInAt(2, Location::RequiresRegister()); @@ -2018,7 +1998,7 @@ static void GenPeek(LocationSummary* locations, DataType::Type size, X86_64Assem } void IntrinsicLocationsBuilderX86_64::VisitMemoryPeekByte(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekByte(HInvoke* invoke) { @@ -2026,7 +2006,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekByte(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMemoryPeekIntNative(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekIntNative(HInvoke* invoke) { @@ -2034,7 +2014,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekIntNative(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMemoryPeekLongNative(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekLongNative(HInvoke* invoke) { @@ -2042,17 +2022,16 @@ void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekLongNative(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMemoryPeekShortNative(HInvoke* invoke) { - CreateIntToIntLocations(arena_, invoke); + CreateIntToIntLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekShortNative(HInvoke* invoke) { GenPeek(invoke->GetLocations(), DataType::Type::kInt16, GetAssembler()); } -static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrInt32Constant(invoke->InputAt(1))); } @@ -2104,7 +2083,7 @@ static void GenPoke(LocationSummary* locations, DataType::Type size, X86_64Assem } void IntrinsicLocationsBuilderX86_64::VisitMemoryPokeByte(HInvoke* invoke) { - CreateIntIntToVoidLocations(arena_, invoke); + CreateIntIntToVoidLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeByte(HInvoke* invoke) { @@ -2112,7 +2091,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeByte(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMemoryPokeIntNative(HInvoke* invoke) { - CreateIntIntToVoidLocations(arena_, invoke); + CreateIntIntToVoidLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeIntNative(HInvoke* invoke) { @@ -2120,7 +2099,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeIntNative(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMemoryPokeLongNative(HInvoke* invoke) { - CreateIntIntToVoidLocations(arena_, invoke); + CreateIntIntToVoidLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeLongNative(HInvoke* invoke) { @@ -2128,7 +2107,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeLongNative(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitMemoryPokeShortNative(HInvoke* invoke) { - CreateIntIntToVoidLocations(arena_, invoke); + CreateIntIntToVoidLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeShortNative(HInvoke* invoke) { @@ -2136,9 +2115,8 @@ void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeShortNative(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitThreadCurrentThread(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetOut(Location::RequiresRegister()); } @@ -2194,15 +2172,16 @@ static void GenUnsafeGet(HInvoke* invoke, } } -static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { +static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { bool can_call = kEmitCompilerReadBarrier && (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject || invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile); - LocationSummary* locations = new (arena) LocationSummary(invoke, - (can_call - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall), - kIntrinsified); + LocationSummary* locations = + new (allocator) LocationSummary(invoke, + can_call + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall, + kIntrinsified); if (can_call && kUseBakerReadBarrier) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } @@ -2214,22 +2193,22 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke } void IntrinsicLocationsBuilderX86_64::VisitUnsafeGet(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke); + CreateIntIntIntToIntLocations(allocator_, invoke); } void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke); + CreateIntIntIntToIntLocations(allocator_, invoke); } void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetLong(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke); + CreateIntIntIntToIntLocations(allocator_, invoke); } void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetLongVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke); + CreateIntIntIntToIntLocations(allocator_, invoke); } void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetObject(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke); + CreateIntIntIntToIntLocations(allocator_, invoke); } void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke); + CreateIntIntIntToIntLocations(allocator_, invoke); } @@ -2253,12 +2232,11 @@ void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) } -static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* arena, +static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* allocator, DataType::Type type, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::NoLocation()); // Unused receiver. locations->SetInAt(1, Location::RequiresRegister()); locations->SetInAt(2, Location::RequiresRegister()); @@ -2271,31 +2249,31 @@ static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* arena, } void IntrinsicLocationsBuilderX86_64::VisitUnsafePut(HInvoke* invoke) { - CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt32, invoke); + CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kInt32, invoke); } void IntrinsicLocationsBuilderX86_64::VisitUnsafePutOrdered(HInvoke* invoke) { - CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt32, invoke); + CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kInt32, invoke); } void IntrinsicLocationsBuilderX86_64::VisitUnsafePutVolatile(HInvoke* invoke) { - CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt32, invoke); + CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kInt32, invoke); } void IntrinsicLocationsBuilderX86_64::VisitUnsafePutObject(HInvoke* invoke) { - CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kReference, invoke); + CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kReference, invoke); } void IntrinsicLocationsBuilderX86_64::VisitUnsafePutObjectOrdered(HInvoke* invoke) { - CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kReference, invoke); + CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kReference, invoke); } void IntrinsicLocationsBuilderX86_64::VisitUnsafePutObjectVolatile(HInvoke* invoke) { - CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kReference, invoke); + CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kReference, invoke); } void IntrinsicLocationsBuilderX86_64::VisitUnsafePutLong(HInvoke* invoke) { - CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt64, invoke); + CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kInt64, invoke); } void IntrinsicLocationsBuilderX86_64::VisitUnsafePutLongOrdered(HInvoke* invoke) { - CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt64, invoke); + CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kInt64, invoke); } void IntrinsicLocationsBuilderX86_64::VisitUnsafePutLongVolatile(HInvoke* invoke) { - CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt64, invoke); + CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kInt64, invoke); } // We don't care for ordered: it requires an AnyStore barrier, which is already given by the x86 @@ -2363,17 +2341,18 @@ void IntrinsicCodeGeneratorX86_64::VisitUnsafePutLongVolatile(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ true, codegen_); } -static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena, +static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator, DataType::Type type, HInvoke* invoke) { bool can_call = kEmitCompilerReadBarrier && kUseBakerReadBarrier && (invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject); - LocationSummary* locations = new (arena) LocationSummary(invoke, - (can_call - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall), - kIntrinsified); + LocationSummary* locations = + new (allocator) LocationSummary(invoke, + can_call + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall, + kIntrinsified); locations->SetInAt(0, Location::NoLocation()); // Unused receiver. locations->SetInAt(1, Location::RequiresRegister()); locations->SetInAt(2, Location::RequiresRegister()); @@ -2391,11 +2370,11 @@ static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena, } void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASInt(HInvoke* invoke) { - CreateIntIntIntIntIntToInt(arena_, DataType::Type::kInt32, invoke); + CreateIntIntIntIntIntToInt(allocator_, DataType::Type::kInt32, invoke); } void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASLong(HInvoke* invoke) { - CreateIntIntIntIntIntToInt(arena_, DataType::Type::kInt64, invoke); + CreateIntIntIntIntIntToInt(allocator_, DataType::Type::kInt64, invoke); } void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASObject(HInvoke* invoke) { @@ -2405,7 +2384,7 @@ void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASObject(HInvoke* invoke) { return; } - CreateIntIntIntIntIntToInt(arena_, DataType::Type::kReference, invoke); + CreateIntIntIntIntIntToInt(allocator_, DataType::Type::kReference, invoke); } static void GenCAS(DataType::Type type, HInvoke* invoke, CodeGeneratorX86_64* codegen) { @@ -2537,9 +2516,8 @@ void IntrinsicCodeGeneratorX86_64::VisitUnsafeCASObject(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitIntegerReverse(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::SameAsFirstInput()); locations->AddTemp(Location::RequiresRegister()); @@ -2580,9 +2558,8 @@ void IntrinsicCodeGeneratorX86_64::VisitIntegerReverse(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitLongReverse(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::SameAsFirstInput()); locations->AddTemp(Location::RequiresRegister()); @@ -2625,15 +2602,14 @@ void IntrinsicCodeGeneratorX86_64::VisitLongReverse(HInvoke* invoke) { } static void CreateBitCountLocations( - ArenaAllocator* arena, CodeGeneratorX86_64* codegen, HInvoke* invoke) { + ArenaAllocator* allocator, CodeGeneratorX86_64* codegen, HInvoke* invoke) { if (!codegen->GetInstructionSetFeatures().HasPopCnt()) { // Do nothing if there is no popcnt support. This results in generating // a call for the intrinsic rather than direct code. return; } - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresRegister()); } @@ -2672,7 +2648,7 @@ static void GenBitCount(X86_64Assembler* assembler, } void IntrinsicLocationsBuilderX86_64::VisitIntegerBitCount(HInvoke* invoke) { - CreateBitCountLocations(arena_, codegen_, invoke); + CreateBitCountLocations(allocator_, codegen_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitIntegerBitCount(HInvoke* invoke) { @@ -2680,17 +2656,16 @@ void IntrinsicCodeGeneratorX86_64::VisitIntegerBitCount(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitLongBitCount(HInvoke* invoke) { - CreateBitCountLocations(arena_, codegen_, invoke); + CreateBitCountLocations(allocator_, codegen_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitLongBitCount(HInvoke* invoke) { GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ true); } -static void CreateOneBitLocations(ArenaAllocator* arena, HInvoke* invoke, bool is_high) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateOneBitLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_high) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresRegister()); locations->AddTemp(is_high ? Location::RegisterLocation(RCX) // needs CL @@ -2787,7 +2762,7 @@ static void GenOneBit(X86_64Assembler* assembler, } void IntrinsicLocationsBuilderX86_64::VisitIntegerHighestOneBit(HInvoke* invoke) { - CreateOneBitLocations(arena_, invoke, /* is_high */ true); + CreateOneBitLocations(allocator_, invoke, /* is_high */ true); } void IntrinsicCodeGeneratorX86_64::VisitIntegerHighestOneBit(HInvoke* invoke) { @@ -2795,7 +2770,7 @@ void IntrinsicCodeGeneratorX86_64::VisitIntegerHighestOneBit(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitLongHighestOneBit(HInvoke* invoke) { - CreateOneBitLocations(arena_, invoke, /* is_high */ true); + CreateOneBitLocations(allocator_, invoke, /* is_high */ true); } void IntrinsicCodeGeneratorX86_64::VisitLongHighestOneBit(HInvoke* invoke) { @@ -2803,7 +2778,7 @@ void IntrinsicCodeGeneratorX86_64::VisitLongHighestOneBit(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitIntegerLowestOneBit(HInvoke* invoke) { - CreateOneBitLocations(arena_, invoke, /* is_high */ false); + CreateOneBitLocations(allocator_, invoke, /* is_high */ false); } void IntrinsicCodeGeneratorX86_64::VisitIntegerLowestOneBit(HInvoke* invoke) { @@ -2811,17 +2786,16 @@ void IntrinsicCodeGeneratorX86_64::VisitIntegerLowestOneBit(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitLongLowestOneBit(HInvoke* invoke) { - CreateOneBitLocations(arena_, invoke, /* is_high */ false); + CreateOneBitLocations(allocator_, invoke, /* is_high */ false); } void IntrinsicCodeGeneratorX86_64::VisitLongLowestOneBit(HInvoke* invoke) { GenOneBit(GetAssembler(), codegen_, invoke, /* is_high */ false, /* is_long */ true); } -static void CreateLeadingZeroLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateLeadingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresRegister()); } @@ -2877,7 +2851,7 @@ static void GenLeadingZeros(X86_64Assembler* assembler, } void IntrinsicLocationsBuilderX86_64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { - CreateLeadingZeroLocations(arena_, invoke); + CreateLeadingZeroLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { @@ -2885,17 +2859,16 @@ void IntrinsicCodeGeneratorX86_64::VisitIntegerNumberOfLeadingZeros(HInvoke* inv } void IntrinsicLocationsBuilderX86_64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { - CreateLeadingZeroLocations(arena_, invoke); + CreateLeadingZeroLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true); } -static void CreateTrailingZeroLocations(ArenaAllocator* arena, HInvoke* invoke) { - LocationSummary* locations = new (arena) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); +static void CreateTrailingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke) { + LocationSummary* locations = + new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresRegister()); } @@ -2946,7 +2919,7 @@ static void GenTrailingZeros(X86_64Assembler* assembler, } void IntrinsicLocationsBuilderX86_64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) { - CreateTrailingZeroLocations(arena_, invoke); + CreateTrailingZeroLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) { @@ -2954,7 +2927,7 @@ void IntrinsicCodeGeneratorX86_64::VisitIntegerNumberOfTrailingZeros(HInvoke* in } void IntrinsicLocationsBuilderX86_64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) { - CreateTrailingZeroLocations(arena_, invoke); + CreateTrailingZeroLocations(allocator_, invoke); } void IntrinsicCodeGeneratorX86_64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) { @@ -3029,9 +3002,8 @@ void IntrinsicCodeGeneratorX86_64::VisitIntegerValueOf(HInvoke* invoke) { } void IntrinsicLocationsBuilderX86_64::VisitThreadInterrupted(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetOut(Location::RequiresRegister()); } diff --git a/compiler/optimizing/intrinsics_x86_64.h b/compiler/optimizing/intrinsics_x86_64.h index 4b287886af..b0fbe91a75 100644 --- a/compiler/optimizing/intrinsics_x86_64.h +++ b/compiler/optimizing/intrinsics_x86_64.h @@ -49,7 +49,7 @@ class IntrinsicLocationsBuilderX86_64 FINAL : public IntrinsicVisitor { bool TryDispatch(HInvoke* invoke); private: - ArenaAllocator* arena_; + ArenaAllocator* allocator_; CodeGeneratorX86_64* codegen_; DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderX86_64); diff --git a/compiler/optimizing/licm.cc b/compiler/optimizing/licm.cc index 10524b0ae6..7af1a20f98 100644 --- a/compiler/optimizing/licm.cc +++ b/compiler/optimizing/licm.cc @@ -84,10 +84,10 @@ void LICM::Run() { // Only used during debug. ArenaBitVector* visited = nullptr; if (kIsDebugBuild) { - visited = new (graph_->GetArena()) ArenaBitVector(graph_->GetArena(), - graph_->GetBlocks().size(), - false, - kArenaAllocLICM); + visited = new (graph_->GetAllocator()) ArenaBitVector(graph_->GetAllocator(), + graph_->GetBlocks().size(), + false, + kArenaAllocLICM); } // Post order visit to visit inner loops before outer loops. diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc index 0617e60cfe..adc3cabe87 100644 --- a/compiler/optimizing/licm_test.cc +++ b/compiler/optimizing/licm_test.cc @@ -27,12 +27,10 @@ namespace art { /** * Fixture class for the LICM tests. */ -class LICMTest : public CommonCompilerTest { +class LICMTest : public OptimizingUnitTest { public: LICMTest() - : pool_(), - allocator_(&pool_), - entry_(nullptr), + : entry_(nullptr), loop_preheader_(nullptr), loop_header_(nullptr), loop_body_(nullptr), @@ -41,7 +39,7 @@ class LICMTest : public CommonCompilerTest { parameter_(nullptr), int_constant_(nullptr), float_constant_(nullptr) { - graph_ = CreateGraph(&allocator_); + graph_ = CreateGraph(); } ~LICMTest() { } @@ -49,12 +47,12 @@ class LICMTest : public CommonCompilerTest { // Builds a singly-nested loop structure in CFG. Tests can further populate // the basic blocks with instructions to set up interesting scenarios. void BuildLoop() { - entry_ = new (&allocator_) HBasicBlock(graph_); - loop_preheader_ = new (&allocator_) HBasicBlock(graph_); - loop_header_ = new (&allocator_) HBasicBlock(graph_); - loop_body_ = new (&allocator_) HBasicBlock(graph_); - return_ = new (&allocator_) HBasicBlock(graph_); - exit_ = new (&allocator_) HBasicBlock(graph_); + entry_ = new (GetAllocator()) HBasicBlock(graph_); + loop_preheader_ = new (GetAllocator()) HBasicBlock(graph_); + loop_header_ = new (GetAllocator()) HBasicBlock(graph_); + loop_body_ = new (GetAllocator()) HBasicBlock(graph_); + return_ = new (GetAllocator()) HBasicBlock(graph_); + exit_ = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(entry_); graph_->AddBlock(loop_preheader_); @@ -75,18 +73,18 @@ class LICMTest : public CommonCompilerTest { return_->AddSuccessor(exit_); // Provide boiler-plate instructions. - parameter_ = new (&allocator_) HParameterValue(graph_->GetDexFile(), - dex::TypeIndex(0), - 0, - DataType::Type::kReference); + parameter_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(), + dex::TypeIndex(0), + 0, + DataType::Type::kReference); entry_->AddInstruction(parameter_); int_constant_ = graph_->GetIntConstant(42); float_constant_ = graph_->GetFloatConstant(42.0f); - loop_preheader_->AddInstruction(new (&allocator_) HGoto()); - loop_header_->AddInstruction(new (&allocator_) HIf(parameter_)); - loop_body_->AddInstruction(new (&allocator_) HGoto()); - return_->AddInstruction(new (&allocator_) HReturnVoid()); - exit_->AddInstruction(new (&allocator_) HExit()); + loop_preheader_->AddInstruction(new (GetAllocator()) HGoto()); + loop_header_->AddInstruction(new (GetAllocator()) HIf(parameter_)); + loop_body_->AddInstruction(new (GetAllocator()) HGoto()); + return_->AddInstruction(new (GetAllocator()) HReturnVoid()); + exit_->AddInstruction(new (GetAllocator()) HExit()); } // Performs LICM optimizations (after proper set up). @@ -98,8 +96,6 @@ class LICMTest : public CommonCompilerTest { } // General building fields. - ArenaPool pool_; - ArenaAllocator allocator_; HGraph* graph_; // Specific basic blocks. @@ -123,17 +119,17 @@ TEST_F(LICMTest, FieldHoisting) { BuildLoop(); // Populate the loop with instructions: set/get field with different types. - HInstruction* get_field = new (&allocator_) HInstanceFieldGet(parameter_, - nullptr, - DataType::Type::kInt64, - MemberOffset(10), - false, - kUnknownFieldIndex, - kUnknownClassDefIndex, - graph_->GetDexFile(), - 0); + HInstruction* get_field = new (GetAllocator()) HInstanceFieldGet(parameter_, + nullptr, + DataType::Type::kInt64, + MemberOffset(10), + false, + kUnknownFieldIndex, + kUnknownClassDefIndex, + graph_->GetDexFile(), + 0); loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction()); - HInstruction* set_field = new (&allocator_) HInstanceFieldSet( + HInstruction* set_field = new (GetAllocator()) HInstanceFieldSet( parameter_, int_constant_, nullptr, DataType::Type::kInt32, MemberOffset(20), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph_->GetDexFile(), 0); loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction()); @@ -150,26 +146,26 @@ TEST_F(LICMTest, NoFieldHoisting) { // Populate the loop with instructions: set/get field with same types. ScopedNullHandle<mirror::DexCache> dex_cache; - HInstruction* get_field = new (&allocator_) HInstanceFieldGet(parameter_, - nullptr, - DataType::Type::kInt64, - MemberOffset(10), - false, - kUnknownFieldIndex, - kUnknownClassDefIndex, - graph_->GetDexFile(), - 0); + HInstruction* get_field = new (GetAllocator()) HInstanceFieldGet(parameter_, + nullptr, + DataType::Type::kInt64, + MemberOffset(10), + false, + kUnknownFieldIndex, + kUnknownClassDefIndex, + graph_->GetDexFile(), + 0); loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction()); - HInstruction* set_field = new (&allocator_) HInstanceFieldSet(parameter_, - get_field, - nullptr, - DataType::Type::kInt64, - MemberOffset(10), - false, - kUnknownFieldIndex, - kUnknownClassDefIndex, - graph_->GetDexFile(), - 0); + HInstruction* set_field = new (GetAllocator()) HInstanceFieldSet(parameter_, + get_field, + nullptr, + DataType::Type::kInt64, + MemberOffset(10), + false, + kUnknownFieldIndex, + kUnknownClassDefIndex, + graph_->GetDexFile(), + 0); loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction()); EXPECT_EQ(get_field->GetBlock(), loop_body_); @@ -183,10 +179,10 @@ TEST_F(LICMTest, ArrayHoisting) { BuildLoop(); // Populate the loop with instructions: set/get array with different types. - HInstruction* get_array = new (&allocator_) HArrayGet( + HInstruction* get_array = new (GetAllocator()) HArrayGet( parameter_, int_constant_, DataType::Type::kInt32, 0); loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction()); - HInstruction* set_array = new (&allocator_) HArraySet( + HInstruction* set_array = new (GetAllocator()) HArraySet( parameter_, int_constant_, float_constant_, DataType::Type::kFloat32, 0); loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction()); @@ -201,10 +197,10 @@ TEST_F(LICMTest, NoArrayHoisting) { BuildLoop(); // Populate the loop with instructions: set/get array with same types. - HInstruction* get_array = new (&allocator_) HArrayGet( + HInstruction* get_array = new (GetAllocator()) HArrayGet( parameter_, int_constant_, DataType::Type::kFloat32, 0); loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction()); - HInstruction* set_array = new (&allocator_) HArraySet( + HInstruction* set_array = new (GetAllocator()) HArraySet( parameter_, get_array, float_constant_, DataType::Type::kFloat32, 0); loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction()); diff --git a/compiler/optimizing/linear_order.cc b/compiler/optimizing/linear_order.cc index 80cecd41dc..58e00a810d 100644 --- a/compiler/optimizing/linear_order.cc +++ b/compiler/optimizing/linear_order.cc @@ -16,6 +16,9 @@ #include "linear_order.h" +#include "base/scoped_arena_allocator.h" +#include "base/scoped_arena_containers.h" + namespace art { static bool InSameLoop(HLoopInformation* first_loop, HLoopInformation* second_loop) { @@ -34,7 +37,8 @@ static bool IsInnerLoop(HLoopInformation* outer, HLoopInformation* inner) { } // Helper method to update work list for linear order. -static void AddToListForLinearization(ArenaVector<HBasicBlock*>* worklist, HBasicBlock* block) { +static void AddToListForLinearization(ScopedArenaVector<HBasicBlock*>* worklist, + HBasicBlock* block) { HLoopInformation* block_loop = block->GetLoopInformation(); auto insert_pos = worklist->rbegin(); // insert_pos.base() will be the actual position. for (auto end = worklist->rend(); insert_pos != end; ++insert_pos) { @@ -51,7 +55,7 @@ static void AddToListForLinearization(ArenaVector<HBasicBlock*>* worklist, HBasi } // Helper method to validate linear order. -static bool IsLinearOrderWellFormed(const HGraph* graph, ArenaVector<HBasicBlock*>* linear_order) { +static bool IsLinearOrderWellFormed(const HGraph* graph, ArrayRef<HBasicBlock*> linear_order) { for (HBasicBlock* header : graph->GetBlocks()) { if (header == nullptr || !header->IsLoopHeader()) { continue; @@ -59,7 +63,7 @@ static bool IsLinearOrderWellFormed(const HGraph* graph, ArenaVector<HBasicBlock HLoopInformation* loop = header->GetLoopInformation(); size_t num_blocks = loop->GetBlocks().NumSetBits(); size_t found_blocks = 0u; - for (HBasicBlock* block : *linear_order) { + for (HBasicBlock* block : linear_order) { if (loop->Contains(*block)) { found_blocks++; if (found_blocks == 1u && block != header) { @@ -79,10 +83,8 @@ static bool IsLinearOrderWellFormed(const HGraph* graph, ArenaVector<HBasicBlock return true; } -void LinearizeGraph(const HGraph* graph, - ArenaAllocator* allocator, - ArenaVector<HBasicBlock*>* linear_order) { - DCHECK(linear_order->empty()); +void LinearizeGraphInternal(const HGraph* graph, ArrayRef<HBasicBlock*> linear_order) { + DCHECK_EQ(linear_order.size(), graph->GetReversePostOrder().size()); // Create a reverse post ordering with the following properties: // - Blocks in a loop are consecutive, // - Back-edge is the last block before loop exits. @@ -92,8 +94,9 @@ void LinearizeGraph(const HGraph* graph, // current reverse post order in the graph, but it would require making // order queries to a GrowableArray, which is not the best data structure // for it. - ArenaVector<uint32_t> forward_predecessors(graph->GetBlocks().size(), - allocator->Adapter(kArenaAllocLinearOrder)); + ScopedArenaAllocator allocator(graph->GetArenaStack()); + ScopedArenaVector<uint32_t> forward_predecessors(graph->GetBlocks().size(), + allocator.Adapter(kArenaAllocLinearOrder)); for (HBasicBlock* block : graph->GetReversePostOrder()) { size_t number_of_forward_predecessors = block->GetPredecessors().size(); if (block->IsLoopHeader()) { @@ -105,13 +108,14 @@ void LinearizeGraph(const HGraph* graph, // iterate over the successors. When all non-back edge predecessors of a // successor block are visited, the successor block is added in the worklist // following an order that satisfies the requirements to build our linear graph. - linear_order->reserve(graph->GetReversePostOrder().size()); - ArenaVector<HBasicBlock*> worklist(allocator->Adapter(kArenaAllocLinearOrder)); + ScopedArenaVector<HBasicBlock*> worklist(allocator.Adapter(kArenaAllocLinearOrder)); worklist.push_back(graph->GetEntryBlock()); + size_t num_added = 0u; do { HBasicBlock* current = worklist.back(); worklist.pop_back(); - linear_order->push_back(current); + linear_order[num_added] = current; + ++num_added; for (HBasicBlock* successor : current->GetSuccessors()) { int block_id = successor->GetBlockId(); size_t number_of_remaining_predecessors = forward_predecessors[block_id]; @@ -121,6 +125,7 @@ void LinearizeGraph(const HGraph* graph, forward_predecessors[block_id] = number_of_remaining_predecessors - 1; } } while (!worklist.empty()); + DCHECK_EQ(num_added, linear_order.size()); DCHECK(graph->HasIrreducibleLoops() || IsLinearOrderWellFormed(graph, linear_order)); } diff --git a/compiler/optimizing/linear_order.h b/compiler/optimizing/linear_order.h index 7122d67be9..151db001e1 100644 --- a/compiler/optimizing/linear_order.h +++ b/compiler/optimizing/linear_order.h @@ -17,10 +17,14 @@ #ifndef ART_COMPILER_OPTIMIZING_LINEAR_ORDER_H_ #define ART_COMPILER_OPTIMIZING_LINEAR_ORDER_H_ +#include <type_traits> + #include "nodes.h" namespace art { +void LinearizeGraphInternal(const HGraph* graph, ArrayRef<HBasicBlock*> linear_order); + // Linearizes the 'graph' such that: // (1): a block is always after its dominator, // (2): blocks of loops are contiguous. @@ -32,9 +36,15 @@ namespace art { // // for (HBasicBlock* block : ReverseRange(linear_order)) // linear post order // -void LinearizeGraph(const HGraph* graph, - ArenaAllocator* allocator, - ArenaVector<HBasicBlock*>* linear_order); +template <typename Vector> +void LinearizeGraph(const HGraph* graph, Vector* linear_order) { + static_assert(std::is_same<HBasicBlock*, typename Vector::value_type>::value, + "Vector::value_type must be HBasicBlock*."); + // Resize the vector and pass an ArrayRef<> to internal implementation which is shared + // for all kinds of vectors, i.e. ArenaVector<> or ScopedArenaVector<>. + linear_order->resize(graph->GetReversePostOrder().size()); + LinearizeGraphInternal(graph, ArrayRef<HBasicBlock*>(*linear_order)); +} } // namespace art diff --git a/compiler/optimizing/linearize_test.cc b/compiler/optimizing/linearize_test.cc index 3831aa6c91..b2a9c0a8e7 100644 --- a/compiler/optimizing/linearize_test.cc +++ b/compiler/optimizing/linearize_test.cc @@ -32,17 +32,20 @@ namespace art { -class LinearizeTest : public CommonCompilerTest {}; +class LinearizeTest : public OptimizingUnitTest { + protected: + template <size_t number_of_blocks> + void TestCode(const uint16_t* data, const uint32_t (&expected_order)[number_of_blocks]); +}; template <size_t number_of_blocks> -static void TestCode(const uint16_t* data, const uint32_t (&expected_order)[number_of_blocks]) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - HGraph* graph = CreateCFG(&allocator, data); +void LinearizeTest::TestCode(const uint16_t* data, + const uint32_t (&expected_order)[number_of_blocks]) { + HGraph* graph = CreateCFG(data); std::unique_ptr<const X86InstructionSetFeatures> features_x86( X86InstructionSetFeatures::FromCppDefines()); x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions()); - SsaLivenessAnalysis liveness(graph, &codegen); + SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator()); liveness.Analyze(); ASSERT_EQ(graph->GetLinearOrder().size(), number_of_blocks); diff --git a/compiler/optimizing/live_interval_test.cc b/compiler/optimizing/live_interval_test.cc index 405f261986..c60386d7b7 100644 --- a/compiler/optimizing/live_interval_test.cc +++ b/compiler/optimizing/live_interval_test.cc @@ -23,29 +23,29 @@ namespace art { TEST(LiveInterval, GetStart) { - ArenaPool pool; - ArenaAllocator allocator(&pool); + ArenaPoolAndAllocator pool; + ScopedArenaAllocator* allocator = pool.GetScopedAllocator(); { static constexpr size_t ranges[][2] = {{0, 42}}; - LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator); + LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator); ASSERT_EQ(0u, interval->GetStart()); } { static constexpr size_t ranges[][2] = {{4, 12}, {14, 16}}; - LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator); + LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator); ASSERT_EQ(4u, interval->GetStart()); } } TEST(LiveInterval, IsDeadAt) { - ArenaPool pool; - ArenaAllocator allocator(&pool); + ArenaPoolAndAllocator pool; + ScopedArenaAllocator* allocator = pool.GetScopedAllocator(); { static constexpr size_t ranges[][2] = {{0, 42}}; - LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator); + LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator); ASSERT_TRUE(interval->IsDeadAt(42)); ASSERT_TRUE(interval->IsDeadAt(43)); ASSERT_FALSE(interval->IsDeadAt(41)); @@ -55,7 +55,7 @@ TEST(LiveInterval, IsDeadAt) { { static constexpr size_t ranges[][2] = {{4, 12}, {14, 16}}; - LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator); + LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator); ASSERT_TRUE(interval->IsDeadAt(16)); ASSERT_TRUE(interval->IsDeadAt(32)); ASSERT_FALSE(interval->IsDeadAt(0)); @@ -68,12 +68,12 @@ TEST(LiveInterval, IsDeadAt) { } TEST(LiveInterval, Covers) { - ArenaPool pool; - ArenaAllocator allocator(&pool); + ArenaPoolAndAllocator pool; + ScopedArenaAllocator* allocator = pool.GetScopedAllocator(); { static constexpr size_t ranges[][2] = {{0, 42}}; - LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator); + LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator); ASSERT_TRUE(interval->Covers(0)); ASSERT_TRUE(interval->Covers(4)); ASSERT_TRUE(interval->Covers(41)); @@ -83,7 +83,7 @@ TEST(LiveInterval, Covers) { { static constexpr size_t ranges[][2] = {{4, 12}, {14, 16}}; - LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator); + LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator); ASSERT_FALSE(interval->Covers(0)); ASSERT_TRUE(interval->Covers(4)); ASSERT_TRUE(interval->Covers(11)); @@ -96,68 +96,68 @@ TEST(LiveInterval, Covers) { } TEST(LiveInterval, FirstIntersectionWith) { - ArenaPool pool; - ArenaAllocator allocator(&pool); + ArenaPoolAndAllocator pool; + ScopedArenaAllocator* allocator = pool.GetScopedAllocator(); { static constexpr size_t ranges1[][2] = {{0, 4}, {8, 10}}; - LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator); + LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), allocator); static constexpr size_t ranges2[][2] = {{5, 6}}; - LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator); + LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), allocator); ASSERT_EQ(kNoLifetime, interval1->FirstIntersectionWith(interval2)); } { static constexpr size_t ranges1[][2] = {{0, 4}, {8, 10}}; - LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator); + LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), allocator); static constexpr size_t ranges2[][2] = {{5, 42}}; - LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator); + LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), allocator); ASSERT_EQ(8u, interval1->FirstIntersectionWith(interval2)); } { static constexpr size_t ranges1[][2] = {{0, 4}, {8, 10}}; - LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator); + LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), allocator); static constexpr size_t ranges2[][2] = {{5, 6}, {7, 8}, {11, 12}}; - LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator); + LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), allocator); ASSERT_EQ(kNoLifetime, interval1->FirstIntersectionWith(interval2)); } { static constexpr size_t ranges1[][2] = {{0, 4}, {8, 10}}; - LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator); + LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), allocator); static constexpr size_t ranges2[][2] = {{5, 6}, {7, 8}, {9, 10}}; - LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator); + LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), allocator); ASSERT_EQ(9u, interval1->FirstIntersectionWith(interval2)); } { static constexpr size_t ranges1[][2] = {{0, 1}, {2, 7}, {8, 10}}; - LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator); + LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), allocator); static constexpr size_t ranges2[][2] = {{1, 2}, {6, 7}, {9, 10}}; - LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator); + LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), allocator); ASSERT_EQ(6u, interval1->FirstIntersectionWith(interval2)); } { static constexpr size_t ranges1[][2] = {{0, 1}, {2, 8}, {55, 58}}; - LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator); + LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), allocator); static constexpr size_t ranges2[][2] = {{1, 2}, {11, 42}, {43, 48}, {54, 56}}; - LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator); + LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), allocator); ASSERT_EQ(55u, interval1->FirstIntersectionWith(interval2)); } { static constexpr size_t ranges1[][2] = {{0, 1}, {2, 8}, {15, 18}, {27, 32}, {41, 53}, {54, 60}}; - LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator); + LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), allocator); static constexpr size_t ranges2[][2] = {{1, 2}, {11, 12}, {19, 25}, {34, 42}, {52, 60}}; - LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator); + LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), allocator); ASSERT_EQ(41u, interval1->FirstIntersectionWith(interval2)); } @@ -188,13 +188,13 @@ static bool RangesEquals(LiveInterval* interval, } TEST(LiveInterval, SplitAt) { - ArenaPool pool; - ArenaAllocator allocator(&pool); + ArenaPoolAndAllocator pool; + ScopedArenaAllocator* allocator = pool.GetScopedAllocator(); { // Test within one range. static constexpr size_t ranges[][2] = {{0, 4}}; - LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator); + LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator); LiveInterval* split = interval->SplitAt(1); static constexpr size_t expected[][2] = {{0, 1}}; ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected))); @@ -205,7 +205,7 @@ TEST(LiveInterval, SplitAt) { { // Test just before the end of one range. static constexpr size_t ranges[][2] = {{0, 4}}; - LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator); + LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator); LiveInterval* split = interval->SplitAt(3); static constexpr size_t expected[][2] = {{0, 3}}; ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected))); @@ -216,7 +216,7 @@ TEST(LiveInterval, SplitAt) { { // Test withing the first range. static constexpr size_t ranges[][2] = {{0, 4}, {8, 12}}; - LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator); + LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator); LiveInterval* split = interval->SplitAt(1); static constexpr size_t expected[][2] = {{0, 1}}; ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected))); @@ -227,7 +227,7 @@ TEST(LiveInterval, SplitAt) { { // Test in a hole. static constexpr size_t ranges[][2] = {{0, 4}, {8, 12}}; - LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator); + LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator); LiveInterval* split = interval->SplitAt(5); static constexpr size_t expected[][2] = {{0, 4}}; ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected))); @@ -238,7 +238,7 @@ TEST(LiveInterval, SplitAt) { { // Test withing the second range. static constexpr size_t ranges[][2] = {{0, 4}, {8, 12}}; - LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator); + LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator); LiveInterval* split = interval->SplitAt(9); static constexpr size_t expected[][2] = {{0, 4}, {8, 9}}; ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected))); @@ -249,7 +249,7 @@ TEST(LiveInterval, SplitAt) { { // Test at the beginning of the second range. static constexpr size_t ranges[][2] = {{0, 4}, {6, 10}}; - LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator); + LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator); LiveInterval* split = interval->SplitAt(6); static constexpr size_t expected[][2] = {{0, 4}}; ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected))); @@ -260,7 +260,7 @@ TEST(LiveInterval, SplitAt) { { // Test at the end of the first range. static constexpr size_t ranges[][2] = {{0, 4}, {6, 10}}; - LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator); + LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator); LiveInterval* split = interval->SplitAt(4); static constexpr size_t expected[][2] = {{0, 4}}; ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected))); @@ -271,7 +271,7 @@ TEST(LiveInterval, SplitAt) { { // Test that we get null if we split at a position where the interval is dead. static constexpr size_t ranges[][2] = {{0, 4}}; - LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator); + LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator); LiveInterval* split = interval->SplitAt(5); ASSERT_TRUE(split == nullptr); ASSERT_TRUE(RangesEquals(interval, ranges, arraysize(ranges))); @@ -279,13 +279,13 @@ TEST(LiveInterval, SplitAt) { } TEST(LiveInterval, AddLoopRange) { - ArenaPool pool; - ArenaAllocator allocator(&pool); + ArenaPoolAndAllocator pool; + ScopedArenaAllocator* allocator = pool.GetScopedAllocator(); { // Test when only used in a loop. static constexpr size_t ranges[][2] = {{0, 4}}; - LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator); + LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator); interval->AddLoopRange(0, 8); LiveRange* range = interval->GetFirstRange(); ASSERT_TRUE(range->GetNext() == nullptr); @@ -296,7 +296,7 @@ TEST(LiveInterval, AddLoopRange) { { // Test when only used in a loop. static constexpr size_t ranges[][2] = {{2, 4}}; - LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator); + LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator); interval->AddLoopRange(0, 8); LiveRange* range = interval->GetFirstRange(); ASSERT_TRUE(range->GetNext() == nullptr); @@ -307,7 +307,7 @@ TEST(LiveInterval, AddLoopRange) { { // Test when used just after the loop. static constexpr size_t ranges[][2] = {{2, 4}, {8, 10}}; - LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator); + LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator); interval->AddLoopRange(0, 8); LiveRange* range = interval->GetFirstRange(); ASSERT_TRUE(range->GetNext() == nullptr); @@ -318,7 +318,7 @@ TEST(LiveInterval, AddLoopRange) { { // Test when use after the loop is after a lifetime hole. static constexpr size_t ranges[][2] = {{2, 4}, {10, 12}}; - LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator); + LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator); interval->AddLoopRange(0, 8); LiveRange* range = interval->GetFirstRange(); ASSERT_EQ(range->GetStart(), 0u); diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc index f9a955fb0a..ddcad5aed8 100644 --- a/compiler/optimizing/live_ranges_test.cc +++ b/compiler/optimizing/live_ranges_test.cc @@ -29,10 +29,13 @@ namespace art { -class LiveRangesTest : public CommonCompilerTest {}; +class LiveRangesTest : public OptimizingUnitTest { + public: + HGraph* BuildGraph(const uint16_t* data); +}; -static HGraph* BuildGraph(const uint16_t* data, ArenaAllocator* allocator) { - HGraph* graph = CreateCFG(allocator, data); +HGraph* LiveRangesTest::BuildGraph(const uint16_t* data) { + HGraph* graph = CreateCFG(data); // Suspend checks implementation may change in the future, and this test relies // on how instructions are ordered. RemoveSuspendChecks(graph); @@ -58,14 +61,12 @@ TEST_F(LiveRangesTest, CFG1) { Instruction::CONST_4 | 0 | 0, Instruction::RETURN); - ArenaPool pool; - ArenaAllocator allocator(&pool); - HGraph* graph = BuildGraph(data, &allocator); + HGraph* graph = BuildGraph(data); std::unique_ptr<const X86InstructionSetFeatures> features_x86( X86InstructionSetFeatures::FromCppDefines()); x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions()); - SsaLivenessAnalysis liveness(graph, &codegen); + SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator()); liveness.Analyze(); LiveInterval* interval = liveness.GetInstructionFromSsaIndex(0)->GetLiveInterval(); @@ -107,13 +108,11 @@ TEST_F(LiveRangesTest, CFG2) { Instruction::GOTO | 0x100, Instruction::RETURN | 0 << 8); - ArenaPool pool; - ArenaAllocator allocator(&pool); - HGraph* graph = BuildGraph(data, &allocator); + HGraph* graph = BuildGraph(data); std::unique_ptr<const X86InstructionSetFeatures> features_x86( X86InstructionSetFeatures::FromCppDefines()); x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions()); - SsaLivenessAnalysis liveness(graph, &codegen); + SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator()); liveness.Analyze(); LiveInterval* interval = liveness.GetInstructionFromSsaIndex(0)->GetLiveInterval(); @@ -158,13 +157,11 @@ TEST_F(LiveRangesTest, CFG3) { Instruction::CONST_4 | 4 << 12 | 0, Instruction::RETURN | 0 << 8); - ArenaPool pool; - ArenaAllocator allocator(&pool); - HGraph* graph = BuildGraph(data, &allocator); + HGraph* graph = BuildGraph(data); std::unique_ptr<const X86InstructionSetFeatures> features_x86( X86InstructionSetFeatures::FromCppDefines()); x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions()); - SsaLivenessAnalysis liveness(graph, &codegen); + SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator()); liveness.Analyze(); // Test for the 4 constant. @@ -236,14 +233,12 @@ TEST_F(LiveRangesTest, Loop1) { Instruction::CONST_4 | 5 << 12 | 1 << 8, Instruction::RETURN | 1 << 8); - ArenaPool pool; - ArenaAllocator allocator(&pool); - HGraph* graph = BuildGraph(data, &allocator); + HGraph* graph = BuildGraph(data); RemoveSuspendChecks(graph); std::unique_ptr<const X86InstructionSetFeatures> features_x86( X86InstructionSetFeatures::FromCppDefines()); x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions()); - SsaLivenessAnalysis liveness(graph, &codegen); + SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator()); liveness.Analyze(); // Test for the 0 constant. @@ -316,13 +311,11 @@ TEST_F(LiveRangesTest, Loop2) { Instruction::GOTO | 0xFB00, Instruction::RETURN | 0 << 8); - ArenaPool pool; - ArenaAllocator allocator(&pool); - HGraph* graph = BuildGraph(data, &allocator); + HGraph* graph = BuildGraph(data); std::unique_ptr<const X86InstructionSetFeatures> features_x86( X86InstructionSetFeatures::FromCppDefines()); x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions()); - SsaLivenessAnalysis liveness(graph, &codegen); + SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator()); liveness.Analyze(); // Test for the 0 constant. @@ -394,13 +387,11 @@ TEST_F(LiveRangesTest, CFG4) { Instruction::ADD_INT, 1 << 8, Instruction::RETURN); - ArenaPool pool; - ArenaAllocator allocator(&pool); - HGraph* graph = BuildGraph(data, &allocator); + HGraph* graph = BuildGraph(data); std::unique_ptr<const X86InstructionSetFeatures> features_x86( X86InstructionSetFeatures::FromCppDefines()); x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions()); - SsaLivenessAnalysis liveness(graph, &codegen); + SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator()); liveness.Analyze(); // Test for the 0 constant. diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc index 37b58ded59..3eadc8f8fc 100644 --- a/compiler/optimizing/liveness_test.cc +++ b/compiler/optimizing/liveness_test.cc @@ -29,7 +29,10 @@ namespace art { -class LivenessTest : public CommonCompilerTest {}; +class LivenessTest : public OptimizingUnitTest { + protected: + void TestCode(const uint16_t* data, const char* expected); +}; static void DumpBitVector(BitVector* vector, std::ostream& buffer, @@ -43,16 +46,14 @@ static void DumpBitVector(BitVector* vector, buffer << ")\n"; } -static void TestCode(const uint16_t* data, const char* expected) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - HGraph* graph = CreateCFG(&allocator, data); +void LivenessTest::TestCode(const uint16_t* data, const char* expected) { + HGraph* graph = CreateCFG(data); // `Inline` conditions into ifs. PrepareForRegisterAllocation(graph).Run(); std::unique_ptr<const X86InstructionSetFeatures> features_x86( X86InstructionSetFeatures::FromCppDefines()); x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions()); - SsaLivenessAnalysis liveness(graph, &codegen); + SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator()); liveness.Analyze(); std::ostringstream buffer; diff --git a/compiler/optimizing/load_store_analysis.h b/compiler/optimizing/load_store_analysis.h index d46b904c9e..6a25da3cfd 100644 --- a/compiler/optimizing/load_store_analysis.h +++ b/compiler/optimizing/load_store_analysis.h @@ -172,9 +172,9 @@ class HeapLocationCollector : public HGraphVisitor { explicit HeapLocationCollector(HGraph* graph) : HGraphVisitor(graph), - ref_info_array_(graph->GetArena()->Adapter(kArenaAllocLSE)), - heap_locations_(graph->GetArena()->Adapter(kArenaAllocLSE)), - aliasing_matrix_(graph->GetArena(), + ref_info_array_(graph->GetAllocator()->Adapter(kArenaAllocLSE)), + heap_locations_(graph->GetAllocator()->Adapter(kArenaAllocLSE)), + aliasing_matrix_(graph->GetAllocator(), kInitialAliasingMatrixBitVectorSize, true, kArenaAllocLSE), @@ -362,7 +362,7 @@ class HeapLocationCollector : public HGraphVisitor { ReferenceInfo* ref_info = FindReferenceInfoOf(instruction); if (ref_info == nullptr) { size_t pos = ref_info_array_.size(); - ref_info = new (GetGraph()->GetArena()) ReferenceInfo(instruction, pos); + ref_info = new (GetGraph()->GetAllocator()) ReferenceInfo(instruction, pos); ref_info_array_.push_back(ref_info); } return ref_info; @@ -385,7 +385,7 @@ class HeapLocationCollector : public HGraphVisitor { size_t heap_location_idx = FindHeapLocationIndex( ref_info, offset, index, declaring_class_def_index); if (heap_location_idx == kHeapLocationNotFound) { - HeapLocation* heap_loc = new (GetGraph()->GetArena()) + HeapLocation* heap_loc = new (GetGraph()->GetAllocator()) HeapLocation(ref_info, offset, index, declaring_class_def_index); heap_locations_.push_back(heap_loc); return heap_loc; diff --git a/compiler/optimizing/load_store_analysis_test.cc b/compiler/optimizing/load_store_analysis_test.cc index 0df2f27e82..86696d02a1 100644 --- a/compiler/optimizing/load_store_analysis_test.cc +++ b/compiler/optimizing/load_store_analysis_test.cc @@ -22,19 +22,15 @@ namespace art { -class LoadStoreAnalysisTest : public CommonCompilerTest { +class LoadStoreAnalysisTest : public OptimizingUnitTest { public: - LoadStoreAnalysisTest() : pool_(), allocator_(&pool_) { - graph_ = CreateGraph(&allocator_); - } + LoadStoreAnalysisTest() : graph_(CreateGraph()) { } - ArenaPool pool_; - ArenaAllocator allocator_; HGraph* graph_; }; TEST_F(LoadStoreAnalysisTest, ArrayHeapLocations) { - HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(entry); graph_->SetEntryBlock(entry); @@ -48,18 +44,19 @@ TEST_F(LoadStoreAnalysisTest, ArrayHeapLocations) { // array_get2 ArrayGet [array, c2] // array_set1 ArraySet [array, c1, c3] // array_set2 ArraySet [array, index, c3] - HInstruction* array = new (&allocator_) HParameterValue( + HInstruction* array = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); - HInstruction* index = new (&allocator_) HParameterValue( + HInstruction* index = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32); HInstruction* c1 = graph_->GetIntConstant(1); HInstruction* c2 = graph_->GetIntConstant(2); HInstruction* c3 = graph_->GetIntConstant(3); - HInstruction* array_get1 = new (&allocator_) HArrayGet(array, c1, DataType::Type::kInt32, 0); - HInstruction* array_get2 = new (&allocator_) HArrayGet(array, c2, DataType::Type::kInt32, 0); - HInstruction* array_set1 = new (&allocator_) HArraySet(array, c1, c3, DataType::Type::kInt32, 0); + HInstruction* array_get1 = new (GetAllocator()) HArrayGet(array, c1, DataType::Type::kInt32, 0); + HInstruction* array_get2 = new (GetAllocator()) HArrayGet(array, c2, DataType::Type::kInt32, 0); + HInstruction* array_set1 = + new (GetAllocator()) HArraySet(array, c1, c3, DataType::Type::kInt32, 0); HInstruction* array_set2 = - new (&allocator_) HArraySet(array, index, c3, DataType::Type::kInt32, 0); + new (GetAllocator()) HArraySet(array, index, c3, DataType::Type::kInt32, 0); entry->AddInstruction(array); entry->AddInstruction(index); entry->AddInstruction(array_get1); @@ -107,7 +104,7 @@ TEST_F(LoadStoreAnalysisTest, ArrayHeapLocations) { } TEST_F(LoadStoreAnalysisTest, FieldHeapLocations) { - HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(entry); graph_->SetEntryBlock(entry); @@ -119,38 +116,38 @@ TEST_F(LoadStoreAnalysisTest, FieldHeapLocations) { // get_field20 InstanceFieldGet [object, 20] HInstruction* c1 = graph_->GetIntConstant(1); - HInstruction* object = new (&allocator_) HParameterValue(graph_->GetDexFile(), - dex::TypeIndex(0), - 0, - DataType::Type::kReference); - HInstanceFieldSet* set_field10 = new (&allocator_) HInstanceFieldSet(object, - c1, - nullptr, - DataType::Type::kInt32, - MemberOffset(10), - false, - kUnknownFieldIndex, - kUnknownClassDefIndex, - graph_->GetDexFile(), - 0); - HInstanceFieldGet* get_field10 = new (&allocator_) HInstanceFieldGet(object, - nullptr, - DataType::Type::kInt32, - MemberOffset(10), - false, - kUnknownFieldIndex, - kUnknownClassDefIndex, - graph_->GetDexFile(), - 0); - HInstanceFieldGet* get_field20 = new (&allocator_) HInstanceFieldGet(object, - nullptr, - DataType::Type::kInt32, - MemberOffset(20), - false, - kUnknownFieldIndex, - kUnknownClassDefIndex, - graph_->GetDexFile(), - 0); + HInstruction* object = new (GetAllocator()) HParameterValue(graph_->GetDexFile(), + dex::TypeIndex(0), + 0, + DataType::Type::kReference); + HInstanceFieldSet* set_field10 = new (GetAllocator()) HInstanceFieldSet(object, + c1, + nullptr, + DataType::Type::kInt32, + MemberOffset(10), + false, + kUnknownFieldIndex, + kUnknownClassDefIndex, + graph_->GetDexFile(), + 0); + HInstanceFieldGet* get_field10 = new (GetAllocator()) HInstanceFieldGet(object, + nullptr, + DataType::Type::kInt32, + MemberOffset(10), + false, + kUnknownFieldIndex, + kUnknownClassDefIndex, + graph_->GetDexFile(), + 0); + HInstanceFieldGet* get_field20 = new (GetAllocator()) HInstanceFieldGet(object, + nullptr, + DataType::Type::kInt32, + MemberOffset(20), + false, + kUnknownFieldIndex, + kUnknownClassDefIndex, + graph_->GetDexFile(), + 0); entry->AddInstruction(object); entry->AddInstruction(set_field10); entry->AddInstruction(get_field10); @@ -186,34 +183,38 @@ TEST_F(LoadStoreAnalysisTest, FieldHeapLocations) { } TEST_F(LoadStoreAnalysisTest, ArrayIndexAliasingTest) { - HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(entry); graph_->SetEntryBlock(entry); graph_->BuildDominatorTree(); - HInstruction* array = new (&allocator_) HParameterValue( + HInstruction* array = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); - HInstruction* index = new (&allocator_) HParameterValue( + HInstruction* index = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32); HInstruction* c0 = graph_->GetIntConstant(0); HInstruction* c1 = graph_->GetIntConstant(1); HInstruction* c_neg1 = graph_->GetIntConstant(-1); - HInstruction* add0 = new (&allocator_) HAdd(DataType::Type::kInt32, index, c0); - HInstruction* add1 = new (&allocator_) HAdd(DataType::Type::kInt32, index, c1); - HInstruction* sub0 = new (&allocator_) HSub(DataType::Type::kInt32, index, c0); - HInstruction* sub1 = new (&allocator_) HSub(DataType::Type::kInt32, index, c1); - HInstruction* sub_neg1 = new (&allocator_) HSub(DataType::Type::kInt32, index, c_neg1); - HInstruction* rev_sub1 = new (&allocator_) HSub(DataType::Type::kInt32, c1, index); - HInstruction* arr_set1 = new (&allocator_) HArraySet(array, c0, c0, DataType::Type::kInt32, 0); - HInstruction* arr_set2 = new (&allocator_) HArraySet(array, c1, c0, DataType::Type::kInt32, 0); - HInstruction* arr_set3 = new (&allocator_) HArraySet(array, add0, c0, DataType::Type::kInt32, 0); - HInstruction* arr_set4 = new (&allocator_) HArraySet(array, add1, c0, DataType::Type::kInt32, 0); - HInstruction* arr_set5 = new (&allocator_) HArraySet(array, sub0, c0, DataType::Type::kInt32, 0); - HInstruction* arr_set6 = new (&allocator_) HArraySet(array, sub1, c0, DataType::Type::kInt32, 0); + HInstruction* add0 = new (GetAllocator()) HAdd(DataType::Type::kInt32, index, c0); + HInstruction* add1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, index, c1); + HInstruction* sub0 = new (GetAllocator()) HSub(DataType::Type::kInt32, index, c0); + HInstruction* sub1 = new (GetAllocator()) HSub(DataType::Type::kInt32, index, c1); + HInstruction* sub_neg1 = new (GetAllocator()) HSub(DataType::Type::kInt32, index, c_neg1); + HInstruction* rev_sub1 = new (GetAllocator()) HSub(DataType::Type::kInt32, c1, index); + HInstruction* arr_set1 = new (GetAllocator()) HArraySet(array, c0, c0, DataType::Type::kInt32, 0); + HInstruction* arr_set2 = new (GetAllocator()) HArraySet(array, c1, c0, DataType::Type::kInt32, 0); + HInstruction* arr_set3 = + new (GetAllocator()) HArraySet(array, add0, c0, DataType::Type::kInt32, 0); + HInstruction* arr_set4 = + new (GetAllocator()) HArraySet(array, add1, c0, DataType::Type::kInt32, 0); + HInstruction* arr_set5 = + new (GetAllocator()) HArraySet(array, sub0, c0, DataType::Type::kInt32, 0); + HInstruction* arr_set6 = + new (GetAllocator()) HArraySet(array, sub1, c0, DataType::Type::kInt32, 0); HInstruction* arr_set7 = - new (&allocator_) HArraySet(array, rev_sub1, c0, DataType::Type::kInt32, 0); + new (GetAllocator()) HArraySet(array, rev_sub1, c0, DataType::Type::kInt32, 0); HInstruction* arr_set8 = - new (&allocator_) HArraySet(array, sub_neg1, c0, DataType::Type::kInt32, 0); + new (GetAllocator()) HArraySet(array, sub_neg1, c0, DataType::Type::kInt32, 0); entry->AddInstruction(array); entry->AddInstruction(index); @@ -272,14 +273,14 @@ TEST_F(LoadStoreAnalysisTest, ArrayIndexAliasingTest) { } TEST_F(LoadStoreAnalysisTest, ArrayIndexCalculationOverflowTest) { - HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(entry); graph_->SetEntryBlock(entry); graph_->BuildDominatorTree(); - HInstruction* array = new (&allocator_) HParameterValue( + HInstruction* array = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); - HInstruction* index = new (&allocator_) HParameterValue( + HInstruction* index = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32); HInstruction* c0 = graph_->GetIntConstant(0); @@ -290,40 +291,40 @@ TEST_F(LoadStoreAnalysisTest, ArrayIndexCalculationOverflowTest) { HInstruction* c_0x80000001 = graph_->GetIntConstant(0x80000001); // `index+0x80000000` and `index-0x80000000` array indices MAY alias. - HInstruction* add_0x80000000 = new (&allocator_) HAdd( + HInstruction* add_0x80000000 = new (GetAllocator()) HAdd( DataType::Type::kInt32, index, c_0x80000000); - HInstruction* sub_0x80000000 = new (&allocator_) HSub( + HInstruction* sub_0x80000000 = new (GetAllocator()) HSub( DataType::Type::kInt32, index, c_0x80000000); - HInstruction* arr_set_1 = new (&allocator_) HArraySet( + HInstruction* arr_set_1 = new (GetAllocator()) HArraySet( array, add_0x80000000, c0, DataType::Type::kInt32, 0); - HInstruction* arr_set_2 = new (&allocator_) HArraySet( + HInstruction* arr_set_2 = new (GetAllocator()) HArraySet( array, sub_0x80000000, c0, DataType::Type::kInt32, 0); // `index+0x10` and `index-0xFFFFFFF0` array indices MAY alias. - HInstruction* add_0x10 = new (&allocator_) HAdd(DataType::Type::kInt32, index, c_0x10); - HInstruction* sub_0xFFFFFFF0 = new (&allocator_) HSub( + HInstruction* add_0x10 = new (GetAllocator()) HAdd(DataType::Type::kInt32, index, c_0x10); + HInstruction* sub_0xFFFFFFF0 = new (GetAllocator()) HSub( DataType::Type::kInt32, index, c_0xFFFFFFF0); - HInstruction* arr_set_3 = new (&allocator_) HArraySet( + HInstruction* arr_set_3 = new (GetAllocator()) HArraySet( array, add_0x10, c0, DataType::Type::kInt32, 0); - HInstruction* arr_set_4 = new (&allocator_) HArraySet( + HInstruction* arr_set_4 = new (GetAllocator()) HArraySet( array, sub_0xFFFFFFF0, c0, DataType::Type::kInt32, 0); // `index+0x7FFFFFFF` and `index-0x80000001` array indices MAY alias. - HInstruction* add_0x7FFFFFFF = new (&allocator_) HAdd( + HInstruction* add_0x7FFFFFFF = new (GetAllocator()) HAdd( DataType::Type::kInt32, index, c_0x7FFFFFFF); - HInstruction* sub_0x80000001 = new (&allocator_) HSub( + HInstruction* sub_0x80000001 = new (GetAllocator()) HSub( DataType::Type::kInt32, index, c_0x80000001); - HInstruction* arr_set_5 = new (&allocator_) HArraySet( + HInstruction* arr_set_5 = new (GetAllocator()) HArraySet( array, add_0x7FFFFFFF, c0, DataType::Type::kInt32, 0); - HInstruction* arr_set_6 = new (&allocator_) HArraySet( + HInstruction* arr_set_6 = new (GetAllocator()) HArraySet( array, sub_0x80000001, c0, DataType::Type::kInt32, 0); // `index+0` and `index-0` array indices MAY alias. - HInstruction* add_0 = new (&allocator_) HAdd(DataType::Type::kInt32, index, c0); - HInstruction* sub_0 = new (&allocator_) HSub(DataType::Type::kInt32, index, c0); - HInstruction* arr_set_7 = new (&allocator_) HArraySet( + HInstruction* add_0 = new (GetAllocator()) HAdd(DataType::Type::kInt32, index, c0); + HInstruction* sub_0 = new (GetAllocator()) HSub(DataType::Type::kInt32, index, c0); + HInstruction* arr_set_7 = new (GetAllocator()) HArraySet( array, add_0, c0, DataType::Type::kInt32, 0); - HInstruction* arr_set_8 = new (&allocator_) HArraySet( + HInstruction* arr_set_8 = new (GetAllocator()) HArraySet( array, sub_0, c0, DataType::Type::kInt32, 0); entry->AddInstruction(array); diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc index 54c2d43e9c..39bfc86432 100644 --- a/compiler/optimizing/load_store_elimination.cc +++ b/compiler/optimizing/load_store_elimination.cc @@ -49,13 +49,13 @@ class LSEVisitor : public HGraphVisitor { ArenaVector<HInstruction*>(heap_locations_collector. GetNumberOfHeapLocations(), kUnknownHeapValue, - graph->GetArena()->Adapter(kArenaAllocLSE)), - graph->GetArena()->Adapter(kArenaAllocLSE)), - removed_loads_(graph->GetArena()->Adapter(kArenaAllocLSE)), - substitute_instructions_for_loads_(graph->GetArena()->Adapter(kArenaAllocLSE)), - possibly_removed_stores_(graph->GetArena()->Adapter(kArenaAllocLSE)), - singleton_new_instances_(graph->GetArena()->Adapter(kArenaAllocLSE)), - singleton_new_arrays_(graph->GetArena()->Adapter(kArenaAllocLSE)) { + graph->GetAllocator()->Adapter(kArenaAllocLSE)), + graph->GetAllocator()->Adapter(kArenaAllocLSE)), + removed_loads_(graph->GetAllocator()->Adapter(kArenaAllocLSE)), + substitute_instructions_for_loads_(graph->GetAllocator()->Adapter(kArenaAllocLSE)), + possibly_removed_stores_(graph->GetAllocator()->Adapter(kArenaAllocLSE)), + singleton_new_instances_(graph->GetAllocator()->Adapter(kArenaAllocLSE)), + singleton_new_arrays_(graph->GetAllocator()->Adapter(kArenaAllocLSE)) { } void VisitBasicBlock(HBasicBlock* block) OVERRIDE { diff --git a/compiler/optimizing/locations.cc b/compiler/optimizing/locations.cc index 40fe35b31b..5879c6fa07 100644 --- a/compiler/optimizing/locations.cc +++ b/compiler/optimizing/locations.cc @@ -28,10 +28,10 @@ static_assert(std::is_trivially_copyable<Location>::value, "Location should be t LocationSummary::LocationSummary(HInstruction* instruction, CallKind call_kind, - bool intrinsified) - : inputs_(instruction->InputCount(), - instruction->GetBlock()->GetGraph()->GetArena()->Adapter(kArenaAllocLocationSummary)), - temps_(instruction->GetBlock()->GetGraph()->GetArena()->Adapter(kArenaAllocLocationSummary)), + bool intrinsified, + ArenaAllocator* allocator) + : inputs_(instruction->InputCount(), allocator->Adapter(kArenaAllocLocationSummary)), + temps_(allocator->Adapter(kArenaAllocLocationSummary)), call_kind_(call_kind), intrinsified_(intrinsified), has_custom_slow_path_calling_convention_(false), @@ -43,11 +43,17 @@ LocationSummary::LocationSummary(HInstruction* instruction, instruction->SetLocations(this); if (NeedsSafepoint()) { - ArenaAllocator* arena = instruction->GetBlock()->GetGraph()->GetArena(); - stack_mask_ = ArenaBitVector::Create(arena, 0, true, kArenaAllocLocationSummary); + stack_mask_ = ArenaBitVector::Create(allocator, 0, true, kArenaAllocLocationSummary); } } +LocationSummary::LocationSummary(HInstruction* instruction, + CallKind call_kind, + bool intrinsified) + : LocationSummary(instruction, + call_kind, + intrinsified, + instruction->GetBlock()->GetGraph()->GetAllocator()) {} Location Location::RegisterOrConstant(HInstruction* instruction) { return instruction->IsConstant() diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h index 6f0dbce2df..d56c151748 100644 --- a/compiler/optimizing/locations.h +++ b/compiler/optimizing/locations.h @@ -665,6 +665,11 @@ class LocationSummary : public ArenaObject<kArenaAllocLocationSummary> { } private: + LocationSummary(HInstruction* instruction, + CallKind call_kind, + bool intrinsified, + ArenaAllocator* allocator); + ArenaVector<Location> inputs_; ArenaVector<Location> temps_; const CallKind call_kind_; diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc index c51fafa695..d87861bde0 100644 --- a/compiler/optimizing/loop_optimization.cc +++ b/compiler/optimizing/loop_optimization.cc @@ -429,7 +429,7 @@ static HInstruction* Insert(HBasicBlock* block, HInstruction* instruction) { // Check that instructions from the induction sets are fully removed: have no uses // and no other instructions use them. -static bool CheckInductionSetFullyRemoved(ArenaSet<HInstruction*>* iset) { +static bool CheckInductionSetFullyRemoved(ScopedArenaSet<HInstruction*>* iset) { for (HInstruction* instr : *iset) { if (instr->GetBlock() != nullptr || !instr->GetUses().empty() || @@ -453,7 +453,7 @@ HLoopOptimization::HLoopOptimization(HGraph* graph, compiler_driver_(compiler_driver), induction_range_(induction_analysis), loop_allocator_(nullptr), - global_allocator_(graph_->GetArena()), + global_allocator_(graph_->GetAllocator()), top_loop_(nullptr), last_loop_(nullptr), iset_(nullptr), @@ -465,7 +465,12 @@ HLoopOptimization::HLoopOptimization(HGraph* graph, vector_runtime_test_a_(nullptr), vector_runtime_test_b_(nullptr), vector_map_(nullptr), - vector_permanent_map_(nullptr) { + vector_permanent_map_(nullptr), + vector_mode_(kSequential), + vector_preheader_(nullptr), + vector_header_(nullptr), + vector_body_(nullptr), + vector_index_(nullptr) { } void HLoopOptimization::Run() { @@ -475,10 +480,8 @@ void HLoopOptimization::Run() { return; } - // Phase-local allocator that draws from the global pool. Since the allocator - // itself resides on the stack, it is destructed on exiting Run(), which - // implies its underlying memory is released immediately. - ArenaAllocator allocator(global_allocator_->GetArenaPool()); + // Phase-local allocator. + ScopedArenaAllocator allocator(graph_->GetArenaStack()); loop_allocator_ = &allocator; // Perform loop optimizations. @@ -499,8 +502,8 @@ void HLoopOptimization::Run() { void HLoopOptimization::LocalRun() { // Build the linear order using the phase-local allocator. This step enables building // a loop hierarchy that properly reflects the outer-inner and previous-next relation. - ArenaVector<HBasicBlock*> linear_order(loop_allocator_->Adapter(kArenaAllocLinearOrder)); - LinearizeGraph(graph_, loop_allocator_, &linear_order); + ScopedArenaVector<HBasicBlock*> linear_order(loop_allocator_->Adapter(kArenaAllocLinearOrder)); + LinearizeGraph(graph_, &linear_order); // Build the loop hierarchy. for (HBasicBlock* block : linear_order) { @@ -513,13 +516,13 @@ void HLoopOptimization::LocalRun() { // temporary data structures using the phase-local allocator. All new HIR // should use the global allocator. if (top_loop_ != nullptr) { - ArenaSet<HInstruction*> iset(loop_allocator_->Adapter(kArenaAllocLoopOptimization)); - ArenaSafeMap<HInstruction*, HInstruction*> reds( + ScopedArenaSet<HInstruction*> iset(loop_allocator_->Adapter(kArenaAllocLoopOptimization)); + ScopedArenaSafeMap<HInstruction*, HInstruction*> reds( std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization)); - ArenaSet<ArrayReference> refs(loop_allocator_->Adapter(kArenaAllocLoopOptimization)); - ArenaSafeMap<HInstruction*, HInstruction*> map( + ScopedArenaSet<ArrayReference> refs(loop_allocator_->Adapter(kArenaAllocLoopOptimization)); + ScopedArenaSafeMap<HInstruction*, HInstruction*> map( std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization)); - ArenaSafeMap<HInstruction*, HInstruction*> perm( + ScopedArenaSafeMap<HInstruction*, HInstruction*> perm( std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization)); // Attach. iset_ = &iset; diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h index 6e6e3873f9..b1b3d110bc 100644 --- a/compiler/optimizing/loop_optimization.h +++ b/compiler/optimizing/loop_optimization.h @@ -17,6 +17,8 @@ #ifndef ART_COMPILER_OPTIMIZING_LOOP_OPTIMIZATION_H_ #define ART_COMPILER_OPTIMIZING_LOOP_OPTIMIZATION_H_ +#include "base/scoped_arena_allocator.h" +#include "base/scoped_arena_containers.h" #include "induction_var_range.h" #include "nodes.h" #include "optimization.h" @@ -220,7 +222,7 @@ class HLoopOptimization : public HOptimization { // Phase-local heap memory allocator for the loop optimizer. Storage obtained // through this allocator is immediately released when the loop optimizer is done. - ArenaAllocator* loop_allocator_; + ScopedArenaAllocator* loop_allocator_; // Global heap memory allocator. Used to build HIR. ArenaAllocator* global_allocator_; @@ -232,14 +234,14 @@ class HLoopOptimization : public HOptimization { // Temporary bookkeeping of a set of instructions. // Contents reside in phase-local heap memory. - ArenaSet<HInstruction*>* iset_; + ScopedArenaSet<HInstruction*>* iset_; // Temporary bookkeeping of reduction instructions. Mapping is two-fold: // (1) reductions in the loop-body are mapped back to their phi definition, // (2) phi definitions are mapped to their initial value (updated during // code generation to feed the proper values into the new chain). // Contents reside in phase-local heap memory. - ArenaSafeMap<HInstruction*, HInstruction*>* reductions_; + ScopedArenaSafeMap<HInstruction*, HInstruction*>* reductions_; // Flag that tracks if any simplifications have occurred. bool simplified_; @@ -249,7 +251,7 @@ class HLoopOptimization : public HOptimization { // Set of array references in the vector loop. // Contents reside in phase-local heap memory. - ArenaSet<ArrayReference>* vector_refs_; + ScopedArenaSet<ArrayReference>* vector_refs_; // Dynamic loop peeling candidate for alignment. const ArrayReference* vector_peeling_candidate_; @@ -262,11 +264,11 @@ class HLoopOptimization : public HOptimization { // loop (mode is kSequential) and the actual vector loop (mode is kVector). The data // structure maps original instructions into the new instructions. // Contents reside in phase-local heap memory. - ArenaSafeMap<HInstruction*, HInstruction*>* vector_map_; + ScopedArenaSafeMap<HInstruction*, HInstruction*>* vector_map_; // Permanent mapping used during vectorization synthesis. // Contents reside in phase-local heap memory. - ArenaSafeMap<HInstruction*, HInstruction*>* vector_permanent_map_; + ScopedArenaSafeMap<HInstruction*, HInstruction*>* vector_permanent_map_; // Temporary vectorization bookkeeping. VectorMode vector_mode_; // synthesis mode diff --git a/compiler/optimizing/loop_optimization_test.cc b/compiler/optimizing/loop_optimization_test.cc index 95718ae388..4e1857df5b 100644 --- a/compiler/optimizing/loop_optimization_test.cc +++ b/compiler/optimizing/loop_optimization_test.cc @@ -24,14 +24,12 @@ namespace art { * constructing the loop hierarchy. Actual optimizations are tested * through the checker tests. */ -class LoopOptimizationTest : public CommonCompilerTest { +class LoopOptimizationTest : public OptimizingUnitTest { public: LoopOptimizationTest() - : pool_(), - allocator_(&pool_), - graph_(CreateGraph(&allocator_)), - iva_(new (&allocator_) HInductionVarAnalysis(graph_)), - loop_opt_(new (&allocator_) HLoopOptimization(graph_, nullptr, iva_, nullptr)) { + : graph_(CreateGraph()), + iva_(new (GetAllocator()) HInductionVarAnalysis(graph_)), + loop_opt_(new (GetAllocator()) HLoopOptimization(graph_, nullptr, iva_, nullptr)) { BuildGraph(); } @@ -40,38 +38,38 @@ class LoopOptimizationTest : public CommonCompilerTest { /** Constructs bare minimum graph. */ void BuildGraph() { graph_->SetNumberOfVRegs(1); - entry_block_ = new (&allocator_) HBasicBlock(graph_); - return_block_ = new (&allocator_) HBasicBlock(graph_); - exit_block_ = new (&allocator_) HBasicBlock(graph_); + entry_block_ = new (GetAllocator()) HBasicBlock(graph_); + return_block_ = new (GetAllocator()) HBasicBlock(graph_); + exit_block_ = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(entry_block_); graph_->AddBlock(return_block_); graph_->AddBlock(exit_block_); graph_->SetEntryBlock(entry_block_); graph_->SetExitBlock(exit_block_); - parameter_ = new (&allocator_) HParameterValue(graph_->GetDexFile(), - dex::TypeIndex(0), - 0, - DataType::Type::kInt32); + parameter_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(), + dex::TypeIndex(0), + 0, + DataType::Type::kInt32); entry_block_->AddInstruction(parameter_); - return_block_->AddInstruction(new (&allocator_) HReturnVoid()); - exit_block_->AddInstruction(new (&allocator_) HExit()); + return_block_->AddInstruction(new (GetAllocator()) HReturnVoid()); + exit_block_->AddInstruction(new (GetAllocator()) HExit()); entry_block_->AddSuccessor(return_block_); return_block_->AddSuccessor(exit_block_); } /** Adds a loop nest at given position before successor. */ HBasicBlock* AddLoop(HBasicBlock* position, HBasicBlock* successor) { - HBasicBlock* header = new (&allocator_) HBasicBlock(graph_); - HBasicBlock* body = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* header = new (GetAllocator()) HBasicBlock(graph_); + HBasicBlock* body = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(header); graph_->AddBlock(body); // Control flow. position->ReplaceSuccessor(successor, header); header->AddSuccessor(body); header->AddSuccessor(successor); - header->AddInstruction(new (&allocator_) HIf(parameter_)); + header->AddInstruction(new (GetAllocator()) HIf(parameter_)); body->AddSuccessor(header); - body->AddInstruction(new (&allocator_) HGoto()); + body->AddInstruction(new (GetAllocator()) HGoto()); return header; } @@ -80,7 +78,8 @@ class LoopOptimizationTest : public CommonCompilerTest { graph_->BuildDominatorTree(); iva_->Run(); // Do not release the loop hierarchy. - loop_opt_->loop_allocator_ = &allocator_; + ScopedArenaAllocator loop_allocator(GetArenaStack()); + loop_opt_->loop_allocator_ = &loop_allocator; loop_opt_->LocalRun(); } @@ -101,8 +100,6 @@ class LoopOptimizationTest : public CommonCompilerTest { } // General building fields. - ArenaPool pool_; - ArenaAllocator allocator_; HGraph* graph_; HInductionVarAnalysis* iva_; HLoopOptimization* loop_opt_; @@ -199,8 +196,8 @@ TEST_F(LoopOptimizationTest, LoopNestWithSequence) { // predecessors. TEST_F(LoopOptimizationTest, SimplifyLoop) { // Can't use AddLoop as we want special order for blocks predecessors. - HBasicBlock* header = new (&allocator_) HBasicBlock(graph_); - HBasicBlock* body = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* header = new (GetAllocator()) HBasicBlock(graph_); + HBasicBlock* body = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(header); graph_->AddBlock(body); @@ -213,11 +210,11 @@ TEST_F(LoopOptimizationTest, SimplifyLoop) { DCHECK(header->GetSuccessors()[1] == return_block_); // Data flow. - header->AddInstruction(new (&allocator_) HIf(parameter_)); - body->AddInstruction(new (&allocator_) HGoto()); + header->AddInstruction(new (GetAllocator()) HIf(parameter_)); + body->AddInstruction(new (GetAllocator()) HGoto()); - HPhi* phi = new (&allocator_) HPhi(&allocator_, 0, 0, DataType::Type::kInt32); - HInstruction* add = new (&allocator_) HAdd(DataType::Type::kInt32, phi, parameter_); + HPhi* phi = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32); + HInstruction* add = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi, parameter_); header->AddPhi(phi); body->AddInstruction(add); diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index cae5054ef7..1a537ca47e 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -56,13 +56,13 @@ void HGraph::FindBackEdges(ArenaBitVector* visited) { DCHECK_EQ(visited->GetHighestBitSet(), -1); // Nodes that we're currently visiting, indexed by block id. - ArenaBitVector visiting(arena_, blocks_.size(), false, kArenaAllocGraphBuilder); + ArenaBitVector visiting(allocator_, blocks_.size(), false, kArenaAllocGraphBuilder); // Number of successors visited from a given node, indexed by block id. ArenaVector<size_t> successors_visited(blocks_.size(), 0u, - arena_->Adapter(kArenaAllocGraphBuilder)); + allocator_->Adapter(kArenaAllocGraphBuilder)); // Stack of nodes that we're currently visiting (same as marked in "visiting" above). - ArenaVector<HBasicBlock*> worklist(arena_->Adapter(kArenaAllocGraphBuilder)); + ArenaVector<HBasicBlock*> worklist(allocator_->Adapter(kArenaAllocGraphBuilder)); constexpr size_t kDefaultWorklistSize = 8; worklist.reserve(kDefaultWorklistSize); visited->SetBit(entry_block_->GetBlockId()); @@ -173,7 +173,7 @@ void HGraph::RemoveDeadBlocks(const ArenaBitVector& visited) { } GraphAnalysisResult HGraph::BuildDominatorTree() { - ArenaBitVector visited(arena_, blocks_.size(), false, kArenaAllocGraphBuilder); + ArenaBitVector visited(allocator_, blocks_.size(), false, kArenaAllocGraphBuilder); // (1) Find the back edges in the graph doing a DFS traversal. FindBackEdges(&visited); @@ -259,13 +259,13 @@ void HGraph::ComputeDominanceInformation() { reverse_post_order_.push_back(entry_block_); // Number of visits of a given node, indexed by block id. - ArenaVector<size_t> visits(blocks_.size(), 0u, arena_->Adapter(kArenaAllocGraphBuilder)); + ArenaVector<size_t> visits(blocks_.size(), 0u, allocator_->Adapter(kArenaAllocGraphBuilder)); // Number of successors visited from a given node, indexed by block id. ArenaVector<size_t> successors_visited(blocks_.size(), 0u, - arena_->Adapter(kArenaAllocGraphBuilder)); + allocator_->Adapter(kArenaAllocGraphBuilder)); // Nodes for which we need to visit successors. - ArenaVector<HBasicBlock*> worklist(arena_->Adapter(kArenaAllocGraphBuilder)); + ArenaVector<HBasicBlock*> worklist(allocator_->Adapter(kArenaAllocGraphBuilder)); constexpr size_t kDefaultWorklistSize = 8; worklist.reserve(kDefaultWorklistSize); worklist.push_back(entry_block_); @@ -335,7 +335,7 @@ void HGraph::ComputeDominanceInformation() { } HBasicBlock* HGraph::SplitEdge(HBasicBlock* block, HBasicBlock* successor) { - HBasicBlock* new_block = new (arena_) HBasicBlock(this, successor->GetDexPc()); + HBasicBlock* new_block = new (allocator_) HBasicBlock(this, successor->GetDexPc()); AddBlock(new_block); // Use `InsertBetween` to ensure the predecessor index and successor index of // `block` and `successor` are preserved. @@ -347,7 +347,7 @@ void HGraph::SplitCriticalEdge(HBasicBlock* block, HBasicBlock* successor) { // Insert a new node between `block` and `successor` to split the // critical edge. HBasicBlock* new_block = SplitEdge(block, successor); - new_block->AddInstruction(new (arena_) HGoto(successor->GetDexPc())); + new_block->AddInstruction(new (allocator_) HGoto(successor->GetDexPc())); if (successor->IsLoopHeader()) { // If we split at a back edge boundary, make the new block the back edge. HLoopInformation* info = successor->GetLoopInformation(); @@ -396,9 +396,9 @@ void HGraph::SimplifyLoop(HBasicBlock* header) { // this graph. size_t number_of_incomings = header->GetPredecessors().size() - info->NumberOfBackEdges(); if (number_of_incomings != 1 || (GetEntryBlock()->GetSingleSuccessor() == header)) { - HBasicBlock* pre_header = new (arena_) HBasicBlock(this, header->GetDexPc()); + HBasicBlock* pre_header = new (allocator_) HBasicBlock(this, header->GetDexPc()); AddBlock(pre_header); - pre_header->AddInstruction(new (arena_) HGoto(header->GetDexPc())); + pre_header->AddInstruction(new (allocator_) HGoto(header->GetDexPc())); for (size_t pred = 0; pred < header->GetPredecessors().size(); ++pred) { HBasicBlock* predecessor = header->GetPredecessors()[pred]; @@ -440,7 +440,7 @@ void HGraph::ComputeTryBlockInformation() { try_entry != &block->GetTryCatchInformation()->GetTryEntry())) { // We are either setting try block membership for the first time or it // has changed. - block->SetTryCatchInformation(new (arena_) TryCatchInformation(*try_entry)); + block->SetTryCatchInformation(new (allocator_) TryCatchInformation(*try_entry)); } } } @@ -547,7 +547,7 @@ HNullConstant* HGraph::GetNullConstant(uint32_t dex_pc) { // not null and not in a block. Otherwise, we need to clear the instruction // id and/or any invariants the graph is assuming when adding new instructions. if ((cached_null_constant_ == nullptr) || (cached_null_constant_->GetBlock() == nullptr)) { - cached_null_constant_ = new (arena_) HNullConstant(dex_pc); + cached_null_constant_ = new (allocator_) HNullConstant(dex_pc); cached_null_constant_->SetReferenceTypeInfo(inexact_object_rti_); InsertConstant(cached_null_constant_); } @@ -563,7 +563,7 @@ HCurrentMethod* HGraph::GetCurrentMethod() { // not null and not in a block. Otherwise, we need to clear the instruction // id and/or any invariants the graph is assuming when adding new instructions. if ((cached_current_method_ == nullptr) || (cached_current_method_->GetBlock() == nullptr)) { - cached_current_method_ = new (arena_) HCurrentMethod( + cached_current_method_ = new (allocator_) HCurrentMethod( Is64BitInstructionSet(instruction_set_) ? DataType::Type::kInt64 : DataType::Type::kInt32, entry_block_->GetDexPc()); if (entry_block_->GetFirstInstruction() == nullptr) { @@ -710,7 +710,7 @@ void HLoopInformation::Populate() { bool is_irreducible_loop = HasBackEdgeNotDominatedByHeader(); if (is_irreducible_loop) { - ArenaBitVector visited(graph->GetArena(), + ArenaBitVector visited(graph->GetAllocator(), graph->GetBlocks().size(), /* expandable */ false, kArenaAllocGraphBuilder); @@ -1655,8 +1655,8 @@ HBasicBlock* HBasicBlock::SplitBefore(HInstruction* cursor) { DCHECK(!graph_->IsInSsaForm()) << "Support for SSA form not implemented."; DCHECK_EQ(cursor->GetBlock(), this); - HBasicBlock* new_block = new (GetGraph()->GetArena()) HBasicBlock(GetGraph(), - cursor->GetDexPc()); + HBasicBlock* new_block = + new (GetGraph()->GetAllocator()) HBasicBlock(GetGraph(), cursor->GetDexPc()); new_block->instructions_.first_instruction_ = cursor; new_block->instructions_.last_instruction_ = instructions_.last_instruction_; instructions_.last_instruction_ = cursor->previous_; @@ -1668,7 +1668,7 @@ HBasicBlock* HBasicBlock::SplitBefore(HInstruction* cursor) { } new_block->instructions_.SetBlockOfInstructions(new_block); - AddInstruction(new (GetGraph()->GetArena()) HGoto(new_block->GetDexPc())); + AddInstruction(new (GetGraph()->GetAllocator()) HGoto(new_block->GetDexPc())); for (HBasicBlock* successor : GetSuccessors()) { successor->predecessors_[successor->GetPredecessorIndexOf(this)] = new_block; @@ -1685,7 +1685,7 @@ HBasicBlock* HBasicBlock::CreateImmediateDominator() { DCHECK(!graph_->IsInSsaForm()) << "Support for SSA form not implemented."; DCHECK(!IsCatchBlock()) << "Support for updating try/catch information not implemented."; - HBasicBlock* new_block = new (GetGraph()->GetArena()) HBasicBlock(GetGraph(), GetDexPc()); + HBasicBlock* new_block = new (GetGraph()->GetAllocator()) HBasicBlock(GetGraph(), GetDexPc()); for (HBasicBlock* predecessor : GetPredecessors()) { predecessor->successors_[predecessor->GetSuccessorIndexOf(this)] = new_block; @@ -1701,8 +1701,8 @@ HBasicBlock* HBasicBlock::CreateImmediateDominator() { HBasicBlock* HBasicBlock::SplitBeforeForInlining(HInstruction* cursor) { DCHECK_EQ(cursor->GetBlock(), this); - HBasicBlock* new_block = new (GetGraph()->GetArena()) HBasicBlock(GetGraph(), - cursor->GetDexPc()); + HBasicBlock* new_block = + new (GetGraph()->GetAllocator()) HBasicBlock(GetGraph(), cursor->GetDexPc()); new_block->instructions_.first_instruction_ = cursor; new_block->instructions_.last_instruction_ = instructions_.last_instruction_; instructions_.last_instruction_ = cursor->previous_; @@ -1734,7 +1734,7 @@ HBasicBlock* HBasicBlock::SplitAfterForInlining(HInstruction* cursor) { DCHECK_NE(instructions_.last_instruction_, cursor); DCHECK_EQ(cursor->GetBlock(), this); - HBasicBlock* new_block = new (GetGraph()->GetArena()) HBasicBlock(GetGraph(), GetDexPc()); + HBasicBlock* new_block = new (GetGraph()->GetAllocator()) HBasicBlock(GetGraph(), GetDexPc()); new_block->instructions_.first_instruction_ = cursor->GetNext(); new_block->instructions_.last_instruction_ = instructions_.last_instruction_; cursor->next_->previous_ = nullptr; @@ -2030,7 +2030,7 @@ void HBasicBlock::DisconnectAndDelete() { last_instruction->IsPackedSwitch() || (last_instruction->IsTryBoundary() && IsCatchBlock())); predecessor->RemoveInstruction(last_instruction); - predecessor->AddInstruction(new (graph_->GetArena()) HGoto(last_instruction->GetDexPc())); + predecessor->AddInstruction(new (graph_->GetAllocator()) HGoto(last_instruction->GetDexPc())); } else if (num_pred_successors == 0u) { // The predecessor has no remaining successors and therefore must be dead. // We deliberately leave it without a control-flow instruction so that the @@ -2241,7 +2241,7 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) { if (current->NeedsEnvironment()) { DCHECK(current->HasEnvironment()); current->GetEnvironment()->SetAndCopyParentChain( - outer_graph->GetArena(), invoke->GetEnvironment()); + outer_graph->GetAllocator(), invoke->GetEnvironment()); } } } @@ -2294,7 +2294,7 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) { // into two blocks, merge the first block of the inlined graph into // the first half, and replace the exit block of the inlined graph // with the second half. - ArenaAllocator* allocator = outer_graph->GetArena(); + ArenaAllocator* allocator = outer_graph->GetAllocator(); HBasicBlock* at = invoke->GetBlock(); // Note that we split before the invoke only to simplify polymorphic inlining. HBasicBlock* to = at->SplitBeforeForInlining(invoke); @@ -2478,10 +2478,10 @@ void HGraph::TransformLoopHeaderForBCE(HBasicBlock* header) { HBasicBlock* old_pre_header = header->GetDominator(); // Need extra block to avoid critical edge. - HBasicBlock* if_block = new (arena_) HBasicBlock(this, header->GetDexPc()); - HBasicBlock* true_block = new (arena_) HBasicBlock(this, header->GetDexPc()); - HBasicBlock* false_block = new (arena_) HBasicBlock(this, header->GetDexPc()); - HBasicBlock* new_pre_header = new (arena_) HBasicBlock(this, header->GetDexPc()); + HBasicBlock* if_block = new (allocator_) HBasicBlock(this, header->GetDexPc()); + HBasicBlock* true_block = new (allocator_) HBasicBlock(this, header->GetDexPc()); + HBasicBlock* false_block = new (allocator_) HBasicBlock(this, header->GetDexPc()); + HBasicBlock* new_pre_header = new (allocator_) HBasicBlock(this, header->GetDexPc()); AddBlock(if_block); AddBlock(true_block); AddBlock(false_block); @@ -2536,9 +2536,9 @@ HBasicBlock* HGraph::TransformLoopForVectorization(HBasicBlock* header, HLoopInformation* loop = header->GetLoopInformation(); // Add new loop blocks. - HBasicBlock* new_pre_header = new (arena_) HBasicBlock(this, header->GetDexPc()); - HBasicBlock* new_header = new (arena_) HBasicBlock(this, header->GetDexPc()); - HBasicBlock* new_body = new (arena_) HBasicBlock(this, header->GetDexPc()); + HBasicBlock* new_pre_header = new (allocator_) HBasicBlock(this, header->GetDexPc()); + HBasicBlock* new_header = new (allocator_) HBasicBlock(this, header->GetDexPc()); + HBasicBlock* new_body = new (allocator_) HBasicBlock(this, header->GetDexPc()); AddBlock(new_pre_header); AddBlock(new_header); AddBlock(new_body); @@ -2570,10 +2570,10 @@ HBasicBlock* HGraph::TransformLoopForVectorization(HBasicBlock* header, reverse_post_order_[index_of_body] = new_body; // Add gotos and suspend check (client must add conditional in header). - new_pre_header->AddInstruction(new (arena_) HGoto()); - HSuspendCheck* suspend_check = new (arena_) HSuspendCheck(header->GetDexPc()); + new_pre_header->AddInstruction(new (allocator_) HGoto()); + HSuspendCheck* suspend_check = new (allocator_) HSuspendCheck(header->GetDexPc()); new_header->AddInstruction(suspend_check); - new_body->AddInstruction(new (arena_) HGoto()); + new_body->AddInstruction(new (allocator_) HGoto()); suspend_check->CopyEnvironmentFromWithLoopPhiAdjustment( loop->GetSuspendCheck()->GetEnvironment(), header); @@ -2891,7 +2891,7 @@ void HInstruction::RemoveEnvironmentUsers() { // Returns an instruction with the opposite Boolean value from 'cond'. HInstruction* HGraph::InsertOppositeCondition(HInstruction* cond, HInstruction* cursor) { - ArenaAllocator* allocator = GetArena(); + ArenaAllocator* allocator = GetAllocator(); if (cond->IsCondition() && !DataType::IsFloatingPointType(cond->InputAt(0)->GetType())) { diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index fef0c865ae..99fde755da 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -45,6 +45,7 @@ namespace art { +class ArenaStack; class GraphChecker; class HBasicBlock; class HConstructorFence; @@ -305,7 +306,8 @@ std::ostream& operator<<(std::ostream& os, const ReferenceTypeInfo& rhs); // Control-flow graph of a method. Contains a list of basic blocks. class HGraph : public ArenaObject<kArenaAllocGraph> { public: - HGraph(ArenaAllocator* arena, + HGraph(ArenaAllocator* allocator, + ArenaStack* arena_stack, const DexFile& dex_file, uint32_t method_idx, InstructionSet instruction_set, @@ -313,10 +315,11 @@ class HGraph : public ArenaObject<kArenaAllocGraph> { bool debuggable = false, bool osr = false, int start_instruction_id = 0) - : arena_(arena), - blocks_(arena->Adapter(kArenaAllocBlockList)), - reverse_post_order_(arena->Adapter(kArenaAllocReversePostOrder)), - linear_order_(arena->Adapter(kArenaAllocLinearOrder)), + : allocator_(allocator), + arena_stack_(arena_stack), + blocks_(allocator->Adapter(kArenaAllocBlockList)), + reverse_post_order_(allocator->Adapter(kArenaAllocReversePostOrder)), + linear_order_(allocator->Adapter(kArenaAllocLinearOrder)), entry_block_(nullptr), exit_block_(nullptr), maximum_number_of_out_vregs_(0), @@ -337,22 +340,23 @@ class HGraph : public ArenaObject<kArenaAllocGraph> { number_of_cha_guards_(0), instruction_set_(instruction_set), cached_null_constant_(nullptr), - cached_int_constants_(std::less<int32_t>(), arena->Adapter(kArenaAllocConstantsMap)), - cached_float_constants_(std::less<int32_t>(), arena->Adapter(kArenaAllocConstantsMap)), - cached_long_constants_(std::less<int64_t>(), arena->Adapter(kArenaAllocConstantsMap)), - cached_double_constants_(std::less<int64_t>(), arena->Adapter(kArenaAllocConstantsMap)), + cached_int_constants_(std::less<int32_t>(), allocator->Adapter(kArenaAllocConstantsMap)), + cached_float_constants_(std::less<int32_t>(), allocator->Adapter(kArenaAllocConstantsMap)), + cached_long_constants_(std::less<int64_t>(), allocator->Adapter(kArenaAllocConstantsMap)), + cached_double_constants_(std::less<int64_t>(), allocator->Adapter(kArenaAllocConstantsMap)), cached_current_method_(nullptr), art_method_(nullptr), inexact_object_rti_(ReferenceTypeInfo::CreateInvalid()), osr_(osr), - cha_single_implementation_list_(arena->Adapter(kArenaAllocCHA)) { + cha_single_implementation_list_(allocator->Adapter(kArenaAllocCHA)) { blocks_.reserve(kDefaultNumberOfBlocks); } // Acquires and stores RTI of inexact Object to be used when creating HNullConstant. void InitializeInexactObjectRTI(VariableSizedHandleScope* handles); - ArenaAllocator* GetArena() const { return arena_; } + ArenaAllocator* GetAllocator() const { return allocator_; } + ArenaStack* GetArenaStack() const { return arena_stack_; } const ArenaVector<HBasicBlock*>& GetBlocks() const { return blocks_; } bool IsInSsaForm() const { return in_ssa_form_; } @@ -613,7 +617,7 @@ class HGraph : public ArenaObject<kArenaAllocGraph> { // If not found or previously deleted, create and cache a new instruction. // Don't bother reviving a previously deleted instruction, for simplicity. if (constant == nullptr || constant->GetBlock() == nullptr) { - constant = new (arena_) InstructionType(value, dex_pc); + constant = new (allocator_) InstructionType(value, dex_pc); cache->Overwrite(value, constant); InsertConstant(constant); } @@ -629,7 +633,8 @@ class HGraph : public ArenaObject<kArenaAllocGraph> { // See CacheFloatConstant comment. void CacheDoubleConstant(HDoubleConstant* constant); - ArenaAllocator* const arena_; + ArenaAllocator* const allocator_; + ArenaStack* const arena_stack_; // List of blocks in insertion order. ArenaVector<HBasicBlock*> blocks_; @@ -751,9 +756,12 @@ class HLoopInformation : public ArenaObject<kArenaAllocLoopInfo> { suspend_check_(nullptr), irreducible_(false), contains_irreducible_loop_(false), - back_edges_(graph->GetArena()->Adapter(kArenaAllocLoopInfoBackEdges)), + back_edges_(graph->GetAllocator()->Adapter(kArenaAllocLoopInfoBackEdges)), // Make bit vector growable, as the number of blocks may change. - blocks_(graph->GetArena(), graph->GetBlocks().size(), true, kArenaAllocLoopInfoBackEdges) { + blocks_(graph->GetAllocator(), + graph->GetBlocks().size(), + true, + kArenaAllocLoopInfoBackEdges) { back_edges_.reserve(kDefaultNumberOfBackEdges); } @@ -916,11 +924,11 @@ class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> { public: explicit HBasicBlock(HGraph* graph, uint32_t dex_pc = kNoDexPc) : graph_(graph), - predecessors_(graph->GetArena()->Adapter(kArenaAllocPredecessors)), - successors_(graph->GetArena()->Adapter(kArenaAllocSuccessors)), + predecessors_(graph->GetAllocator()->Adapter(kArenaAllocPredecessors)), + successors_(graph->GetAllocator()->Adapter(kArenaAllocSuccessors)), loop_information_(nullptr), dominator_(nullptr), - dominated_blocks_(graph->GetArena()->Adapter(kArenaAllocDominated)), + dominated_blocks_(graph->GetAllocator()->Adapter(kArenaAllocDominated)), block_id_(kInvalidBlockId), dex_pc_(dex_pc), lifetime_start_(kNoLifetime), @@ -972,7 +980,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> { void AddBackEdge(HBasicBlock* back_edge) { if (loop_information_ == nullptr) { - loop_information_ = new (graph_->GetArena()) HLoopInformation(this, graph_); + loop_information_ = new (graph_->GetAllocator()) HLoopInformation(this, graph_); } DCHECK_EQ(loop_information_->GetHeader(), this); loop_information_->AddBackEdge(back_edge); @@ -1792,21 +1800,23 @@ class SideEffects : public ValueObject { // A HEnvironment object contains the values of virtual registers at a given location. class HEnvironment : public ArenaObject<kArenaAllocEnvironment> { public: - ALWAYS_INLINE HEnvironment(ArenaAllocator* arena, + ALWAYS_INLINE HEnvironment(ArenaAllocator* allocator, size_t number_of_vregs, ArtMethod* method, uint32_t dex_pc, HInstruction* holder) - : vregs_(number_of_vregs, arena->Adapter(kArenaAllocEnvironmentVRegs)), - locations_(arena->Adapter(kArenaAllocEnvironmentLocations)), + : vregs_(number_of_vregs, allocator->Adapter(kArenaAllocEnvironmentVRegs)), + locations_(allocator->Adapter(kArenaAllocEnvironmentLocations)), parent_(nullptr), method_(method), dex_pc_(dex_pc), holder_(holder) { } - ALWAYS_INLINE HEnvironment(ArenaAllocator* arena, const HEnvironment& to_copy, HInstruction* holder) - : HEnvironment(arena, + ALWAYS_INLINE HEnvironment(ArenaAllocator* allocator, + const HEnvironment& to_copy, + HInstruction* holder) + : HEnvironment(allocator, to_copy.Size(), to_copy.GetMethod(), to_copy.GetDexPc(), @@ -1925,7 +1935,7 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> { HInstruction* GetPreviousDisregardingMoves() const; HBasicBlock* GetBlock() const { return block_; } - ArenaAllocator* GetArena() const { return block_->GetGraph()->GetArena(); } + ArenaAllocator* GetAllocator() const { return block_->GetGraph()->GetAllocator(); } void SetBlock(HBasicBlock* block) { block_ = block; } bool IsInBlock() const { return block_ != nullptr; } bool IsInLoop() const { return block_->IsInLoop(); } @@ -2015,7 +2025,7 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> { // Note: fixup_end remains valid across push_front(). auto fixup_end = uses_.empty() ? uses_.begin() : ++uses_.begin(); HUseListNode<HInstruction*>* new_node = - new (GetBlock()->GetGraph()->GetArena()) HUseListNode<HInstruction*>(user, index); + new (GetBlock()->GetGraph()->GetAllocator()) HUseListNode<HInstruction*>(user, index); uses_.push_front(*new_node); FixUpUserRecordsAfterUseInsertion(fixup_end); } @@ -2025,7 +2035,7 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> { // Note: env_fixup_end remains valid across push_front(). auto env_fixup_end = env_uses_.empty() ? env_uses_.begin() : ++env_uses_.begin(); HUseListNode<HEnvironment*>* new_node = - new (GetBlock()->GetGraph()->GetArena()) HUseListNode<HEnvironment*>(user, index); + new (GetBlock()->GetGraph()->GetAllocator()) HUseListNode<HEnvironment*>(user, index); env_uses_.push_front(*new_node); FixUpUserRecordsAfterEnvUseInsertion(env_fixup_end); } @@ -2108,7 +2118,7 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> { // copying, the uses lists are being updated. void CopyEnvironmentFrom(HEnvironment* environment) { DCHECK(environment_ == nullptr); - ArenaAllocator* allocator = GetBlock()->GetGraph()->GetArena(); + ArenaAllocator* allocator = GetBlock()->GetGraph()->GetAllocator(); environment_ = new (allocator) HEnvironment(allocator, *environment, this); environment_->CopyFrom(environment); if (environment->GetParent() != nullptr) { @@ -2119,7 +2129,7 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> { void CopyEnvironmentFromWithLoopPhiAdjustment(HEnvironment* environment, HBasicBlock* block) { DCHECK(environment_ == nullptr); - ArenaAllocator* allocator = GetBlock()->GetGraph()->GetArena(); + ArenaAllocator* allocator = GetBlock()->GetGraph()->GetAllocator(); environment_ = new (allocator) HEnvironment(allocator, *environment, this); environment_->CopyFromWithLoopPhiAdjustment(environment, block); if (environment->GetParent() != nullptr) { @@ -2467,11 +2477,11 @@ class HVariableInputSizeInstruction : public HInstruction { protected: HVariableInputSizeInstruction(SideEffects side_effects, uint32_t dex_pc, - ArenaAllocator* arena, + ArenaAllocator* allocator, size_t number_of_inputs, ArenaAllocKind kind) : HInstruction(side_effects, dex_pc), - inputs_(number_of_inputs, arena->Adapter(kind)) {} + inputs_(number_of_inputs, allocator->Adapter(kind)) {} ArenaVector<HUserRecord<HInstruction*>> inputs_; @@ -2572,7 +2582,7 @@ class HReturn FINAL : public HTemplateInstruction<1> { class HPhi FINAL : public HVariableInputSizeInstruction { public: - HPhi(ArenaAllocator* arena, + HPhi(ArenaAllocator* allocator, uint32_t reg_number, size_t number_of_inputs, DataType::Type type, @@ -2580,7 +2590,7 @@ class HPhi FINAL : public HVariableInputSizeInstruction { : HVariableInputSizeInstruction( SideEffects::None(), dex_pc, - arena, + allocator, number_of_inputs, kArenaAllocPhiInputs), reg_number_(reg_number) { @@ -3019,11 +3029,14 @@ class HDeoptimize FINAL : public HVariableInputSizeInstruction { public: // Use this constructor when the `HDeoptimize` acts as a barrier, where no code can move // across. - HDeoptimize(ArenaAllocator* arena, HInstruction* cond, DeoptimizationKind kind, uint32_t dex_pc) + HDeoptimize(ArenaAllocator* allocator, + HInstruction* cond, + DeoptimizationKind kind, + uint32_t dex_pc) : HVariableInputSizeInstruction( SideEffects::All(), dex_pc, - arena, + allocator, /* number_of_inputs */ 1, kArenaAllocMisc) { SetPackedFlag<kFieldCanBeMoved>(false); @@ -3036,7 +3049,7 @@ class HDeoptimize FINAL : public HVariableInputSizeInstruction { // instead of `guard`. // We set CanTriggerGC to prevent any intermediate address to be live // at the point of the `HDeoptimize`. - HDeoptimize(ArenaAllocator* arena, + HDeoptimize(ArenaAllocator* allocator, HInstruction* cond, HInstruction* guard, DeoptimizationKind kind, @@ -3044,7 +3057,7 @@ class HDeoptimize FINAL : public HVariableInputSizeInstruction { : HVariableInputSizeInstruction( SideEffects::CanTriggerGC(), dex_pc, - arena, + allocator, /* number_of_inputs */ 2, kArenaAllocMisc) { SetPackedFlag<kFieldCanBeMoved>(true); @@ -3108,8 +3121,8 @@ class HShouldDeoptimizeFlag FINAL : public HVariableInputSizeInstruction { public: // CHA guards are only optimized in a separate pass and it has no side effects // with regard to other passes. - HShouldDeoptimizeFlag(ArenaAllocator* arena, uint32_t dex_pc) - : HVariableInputSizeInstruction(SideEffects::None(), dex_pc, arena, 0, kArenaAllocCHA) { + HShouldDeoptimizeFlag(ArenaAllocator* allocator, uint32_t dex_pc) + : HVariableInputSizeInstruction(SideEffects::None(), dex_pc, allocator, 0, kArenaAllocCHA) { } DataType::Type GetType() const OVERRIDE { return DataType::Type::kInt32; } @@ -4076,7 +4089,7 @@ class HInvoke : public HVariableInputSizeInstruction { using InvokeTypeField = BitField<InvokeType, kFieldInvokeType, kFieldInvokeTypeSize>; using ReturnTypeField = BitField<DataType::Type, kFieldReturnType, kFieldReturnTypeSize>; - HInvoke(ArenaAllocator* arena, + HInvoke(ArenaAllocator* allocator, uint32_t number_of_arguments, uint32_t number_of_other_inputs, DataType::Type return_type, @@ -4087,7 +4100,7 @@ class HInvoke : public HVariableInputSizeInstruction { : HVariableInputSizeInstruction( SideEffects::AllExceptGCDependency(), // Assume write/read on all fields/arrays. dex_pc, - arena, + allocator, number_of_arguments + number_of_other_inputs, kArenaAllocInvokeInputs), number_of_arguments_(number_of_arguments), @@ -4114,13 +4127,13 @@ class HInvoke : public HVariableInputSizeInstruction { class HInvokeUnresolved FINAL : public HInvoke { public: - HInvokeUnresolved(ArenaAllocator* arena, + HInvokeUnresolved(ArenaAllocator* allocator, uint32_t number_of_arguments, DataType::Type return_type, uint32_t dex_pc, uint32_t dex_method_index, InvokeType invoke_type) - : HInvoke(arena, + : HInvoke(allocator, number_of_arguments, 0u /* number_of_other_inputs */, return_type, @@ -4138,12 +4151,12 @@ class HInvokeUnresolved FINAL : public HInvoke { class HInvokePolymorphic FINAL : public HInvoke { public: - HInvokePolymorphic(ArenaAllocator* arena, + HInvokePolymorphic(ArenaAllocator* allocator, uint32_t number_of_arguments, DataType::Type return_type, uint32_t dex_pc, uint32_t dex_method_index) - : HInvoke(arena, + : HInvoke(allocator, number_of_arguments, 0u /* number_of_other_inputs */, return_type, @@ -4215,7 +4228,7 @@ class HInvokeStaticOrDirect FINAL : public HInvoke { uint64_t method_load_data; }; - HInvokeStaticOrDirect(ArenaAllocator* arena, + HInvokeStaticOrDirect(ArenaAllocator* allocator, uint32_t number_of_arguments, DataType::Type return_type, uint32_t dex_pc, @@ -4225,7 +4238,7 @@ class HInvokeStaticOrDirect FINAL : public HInvoke { InvokeType invoke_type, MethodReference target_method, ClinitCheckRequirement clinit_check_requirement) - : HInvoke(arena, + : HInvoke(allocator, number_of_arguments, // There is potentially one extra argument for the HCurrentMethod node, and // potentially one other if the clinit check is explicit, and potentially @@ -4410,14 +4423,14 @@ std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::ClinitCheckReq class HInvokeVirtual FINAL : public HInvoke { public: - HInvokeVirtual(ArenaAllocator* arena, + HInvokeVirtual(ArenaAllocator* allocator, uint32_t number_of_arguments, DataType::Type return_type, uint32_t dex_pc, uint32_t dex_method_index, ArtMethod* resolved_method, uint32_t vtable_index) - : HInvoke(arena, + : HInvoke(allocator, number_of_arguments, 0u, return_type, @@ -4458,14 +4471,14 @@ class HInvokeVirtual FINAL : public HInvoke { class HInvokeInterface FINAL : public HInvoke { public: - HInvokeInterface(ArenaAllocator* arena, + HInvokeInterface(ArenaAllocator* allocator, uint32_t number_of_arguments, DataType::Type return_type, uint32_t dex_pc, uint32_t dex_method_index, ArtMethod* resolved_method, uint32_t imt_index) - : HInvoke(arena, + : HInvoke(allocator, number_of_arguments, 0u, return_type, @@ -6637,7 +6650,7 @@ class HConstructorFence FINAL : public HVariableInputSizeInstruction { // about the associated object. HConstructorFence(HInstruction* fence_object, uint32_t dex_pc, - ArenaAllocator* arena) + ArenaAllocator* allocator) // We strongly suspect there is not a more accurate way to describe the fine-grained reordering // constraints described in the class header. We claim that these SideEffects constraints // enforce a superset of the real constraints. @@ -6661,7 +6674,7 @@ class HConstructorFence FINAL : public HVariableInputSizeInstruction { // we can refine the side effect to a smaller set of type reads (see above constraints). : HVariableInputSizeInstruction(SideEffects::AllReads(), dex_pc, - arena, + allocator, /* number_of_inputs */ 1, kArenaAllocConstructorFenceInputs) { DCHECK(fence_object != nullptr); @@ -6878,9 +6891,9 @@ static constexpr size_t kDefaultNumberOfMoves = 4; class HParallelMove FINAL : public HTemplateInstruction<0> { public: - explicit HParallelMove(ArenaAllocator* arena, uint32_t dex_pc = kNoDexPc) + explicit HParallelMove(ArenaAllocator* allocator, uint32_t dex_pc = kNoDexPc) : HTemplateInstruction(SideEffects::None(), dex_pc), - moves_(arena->Adapter(kArenaAllocMoveOperands)) { + moves_(allocator->Adapter(kArenaAllocMoveOperands)) { moves_.reserve(kDefaultNumberOfMoves); } diff --git a/compiler/optimizing/nodes_test.cc b/compiler/optimizing/nodes_test.cc index ada6177bfb..b2180d9f98 100644 --- a/compiler/optimizing/nodes_test.cc +++ b/compiler/optimizing/nodes_test.cc @@ -23,37 +23,36 @@ namespace art { +class NodeTest : public OptimizingUnitTest {}; + /** * Test that removing instruction from the graph removes itself from user lists * and environment lists. */ -TEST(Node, RemoveInstruction) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - - HGraph* graph = CreateGraph(&allocator); - HBasicBlock* entry = new (&allocator) HBasicBlock(graph); +TEST_F(NodeTest, RemoveInstruction) { + HGraph* graph = CreateGraph(); + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(entry); graph->SetEntryBlock(entry); - HInstruction* parameter = new (&allocator) HParameterValue( + HInstruction* parameter = new (GetAllocator()) HParameterValue( graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); entry->AddInstruction(parameter); - entry->AddInstruction(new (&allocator) HGoto()); + entry->AddInstruction(new (GetAllocator()) HGoto()); - HBasicBlock* first_block = new (&allocator) HBasicBlock(graph); + HBasicBlock* first_block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(first_block); entry->AddSuccessor(first_block); - HInstruction* null_check = new (&allocator) HNullCheck(parameter, 0); + HInstruction* null_check = new (GetAllocator()) HNullCheck(parameter, 0); first_block->AddInstruction(null_check); - first_block->AddInstruction(new (&allocator) HReturnVoid()); + first_block->AddInstruction(new (GetAllocator()) HReturnVoid()); - HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph); + HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(exit_block); first_block->AddSuccessor(exit_block); - exit_block->AddInstruction(new (&allocator) HExit()); + exit_block->AddInstruction(new (GetAllocator()) HExit()); - HEnvironment* environment = new (&allocator) HEnvironment( - &allocator, 1, graph->GetArtMethod(), 0, null_check); + HEnvironment* environment = new (GetAllocator()) HEnvironment( + GetAllocator(), 1, graph->GetArtMethod(), 0, null_check); null_check->SetRawEnvironment(environment); environment->SetRawEnvAt(0, parameter); parameter->AddEnvUseAt(null_check->GetEnvironment(), 0); @@ -70,25 +69,22 @@ TEST(Node, RemoveInstruction) { /** * Test that inserting an instruction in the graph updates user lists. */ -TEST(Node, InsertInstruction) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - - HGraph* graph = CreateGraph(&allocator); - HBasicBlock* entry = new (&allocator) HBasicBlock(graph); +TEST_F(NodeTest, InsertInstruction) { + HGraph* graph = CreateGraph(); + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(entry); graph->SetEntryBlock(entry); - HInstruction* parameter1 = new (&allocator) HParameterValue( + HInstruction* parameter1 = new (GetAllocator()) HParameterValue( graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); - HInstruction* parameter2 = new (&allocator) HParameterValue( + HInstruction* parameter2 = new (GetAllocator()) HParameterValue( graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); entry->AddInstruction(parameter1); entry->AddInstruction(parameter2); - entry->AddInstruction(new (&allocator) HExit()); + entry->AddInstruction(new (GetAllocator()) HExit()); ASSERT_FALSE(parameter1->HasUses()); - HInstruction* to_insert = new (&allocator) HNullCheck(parameter1, 0); + HInstruction* to_insert = new (GetAllocator()) HNullCheck(parameter1, 0); entry->InsertInstructionBefore(to_insert, parameter2); ASSERT_TRUE(parameter1->HasUses()); @@ -98,48 +94,42 @@ TEST(Node, InsertInstruction) { /** * Test that adding an instruction in the graph updates user lists. */ -TEST(Node, AddInstruction) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - - HGraph* graph = CreateGraph(&allocator); - HBasicBlock* entry = new (&allocator) HBasicBlock(graph); +TEST_F(NodeTest, AddInstruction) { + HGraph* graph = CreateGraph(); + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(entry); graph->SetEntryBlock(entry); - HInstruction* parameter = new (&allocator) HParameterValue( + HInstruction* parameter = new (GetAllocator()) HParameterValue( graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); entry->AddInstruction(parameter); ASSERT_FALSE(parameter->HasUses()); - HInstruction* to_add = new (&allocator) HNullCheck(parameter, 0); + HInstruction* to_add = new (GetAllocator()) HNullCheck(parameter, 0); entry->AddInstruction(to_add); ASSERT_TRUE(parameter->HasUses()); ASSERT_TRUE(parameter->GetUses().HasExactlyOneElement()); } -TEST(Node, ParentEnvironment) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - - HGraph* graph = CreateGraph(&allocator); - HBasicBlock* entry = new (&allocator) HBasicBlock(graph); +TEST_F(NodeTest, ParentEnvironment) { + HGraph* graph = CreateGraph(); + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(entry); graph->SetEntryBlock(entry); - HInstruction* parameter1 = new (&allocator) HParameterValue( + HInstruction* parameter1 = new (GetAllocator()) HParameterValue( graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); - HInstruction* with_environment = new (&allocator) HNullCheck(parameter1, 0); + HInstruction* with_environment = new (GetAllocator()) HNullCheck(parameter1, 0); entry->AddInstruction(parameter1); entry->AddInstruction(with_environment); - entry->AddInstruction(new (&allocator) HExit()); + entry->AddInstruction(new (GetAllocator()) HExit()); ASSERT_TRUE(parameter1->HasUses()); ASSERT_TRUE(parameter1->GetUses().HasExactlyOneElement()); - HEnvironment* environment = new (&allocator) HEnvironment( - &allocator, 1, graph->GetArtMethod(), 0, with_environment); - ArenaVector<HInstruction*> array(allocator.Adapter()); + HEnvironment* environment = new (GetAllocator()) HEnvironment( + GetAllocator(), 1, graph->GetArtMethod(), 0, with_environment); + ArenaVector<HInstruction*> array(GetAllocator()->Adapter()); array.push_back(parameter1); environment->CopyFrom(array); @@ -148,22 +138,22 @@ TEST(Node, ParentEnvironment) { ASSERT_TRUE(parameter1->HasEnvironmentUses()); ASSERT_TRUE(parameter1->GetEnvUses().HasExactlyOneElement()); - HEnvironment* parent1 = new (&allocator) HEnvironment( - &allocator, 1, graph->GetArtMethod(), 0, nullptr); + HEnvironment* parent1 = new (GetAllocator()) HEnvironment( + GetAllocator(), 1, graph->GetArtMethod(), 0, nullptr); parent1->CopyFrom(array); ASSERT_EQ(parameter1->GetEnvUses().SizeSlow(), 2u); - HEnvironment* parent2 = new (&allocator) HEnvironment( - &allocator, 1, graph->GetArtMethod(), 0, nullptr); + HEnvironment* parent2 = new (GetAllocator()) HEnvironment( + GetAllocator(), 1, graph->GetArtMethod(), 0, nullptr); parent2->CopyFrom(array); - parent1->SetAndCopyParentChain(&allocator, parent2); + parent1->SetAndCopyParentChain(GetAllocator(), parent2); // One use for parent2, and one other use for the new parent of parent1. ASSERT_EQ(parameter1->GetEnvUses().SizeSlow(), 4u); // We have copied the parent chain. So we now have two more uses. - environment->SetAndCopyParentChain(&allocator, parent1); + environment->SetAndCopyParentChain(GetAllocator(), parent1); ASSERT_EQ(parameter1->GetEnvUses().SizeSlow(), 6u); } diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h index 8f3ab11d00..781a59f661 100644 --- a/compiler/optimizing/nodes_vector.h +++ b/compiler/optimizing/nodes_vector.h @@ -71,7 +71,7 @@ class HVecOperation : public HVariableInputSizeInstruction { // TODO: we could introduce SIMD types in HIR. static constexpr DataType::Type kSIMDType = DataType::Type::kFloat64; - HVecOperation(ArenaAllocator* arena, + HVecOperation(ArenaAllocator* allocator, DataType::Type packed_type, SideEffects side_effects, size_t number_of_inputs, @@ -79,7 +79,7 @@ class HVecOperation : public HVariableInputSizeInstruction { uint32_t dex_pc) : HVariableInputSizeInstruction(side_effects, dex_pc, - arena, + allocator, number_of_inputs, kArenaAllocVectorNode), vector_length_(vector_length) { @@ -156,12 +156,12 @@ class HVecOperation : public HVariableInputSizeInstruction { // Abstraction of a unary vector operation. class HVecUnaryOperation : public HVecOperation { public: - HVecUnaryOperation(ArenaAllocator* arena, + HVecUnaryOperation(ArenaAllocator* allocator, HInstruction* input, DataType::Type packed_type, size_t vector_length, uint32_t dex_pc) - : HVecOperation(arena, + : HVecOperation(allocator, packed_type, SideEffects::None(), /* number_of_inputs */ 1, @@ -181,13 +181,13 @@ class HVecUnaryOperation : public HVecOperation { // Abstraction of a binary vector operation. class HVecBinaryOperation : public HVecOperation { public: - HVecBinaryOperation(ArenaAllocator* arena, + HVecBinaryOperation(ArenaAllocator* allocator, HInstruction* left, HInstruction* right, DataType::Type packed_type, size_t vector_length, uint32_t dex_pc) - : HVecOperation(arena, + : HVecOperation(allocator, packed_type, SideEffects::None(), /* number_of_inputs */ 2, @@ -210,13 +210,18 @@ class HVecBinaryOperation : public HVecOperation { // The Android runtime guarantees elements have at least natural alignment. class HVecMemoryOperation : public HVecOperation { public: - HVecMemoryOperation(ArenaAllocator* arena, + HVecMemoryOperation(ArenaAllocator* allocator, DataType::Type packed_type, SideEffects side_effects, size_t number_of_inputs, size_t vector_length, uint32_t dex_pc) - : HVecOperation(arena, packed_type, side_effects, number_of_inputs, vector_length, dex_pc), + : HVecOperation(allocator, + packed_type, + side_effects, + number_of_inputs, + vector_length, + dex_pc), alignment_(DataType::Size(packed_type), 0) { DCHECK_GE(number_of_inputs, 2u); } @@ -260,12 +265,12 @@ inline static bool HasConsistentPackedTypes(HInstruction* input, DataType::Type // viz. replicate(x) = [ x, .. , x ]. class HVecReplicateScalar FINAL : public HVecUnaryOperation { public: - HVecReplicateScalar(ArenaAllocator* arena, + HVecReplicateScalar(ArenaAllocator* allocator, HInstruction* scalar, DataType::Type packed_type, size_t vector_length, uint32_t dex_pc) - : HVecUnaryOperation(arena, scalar, packed_type, vector_length, dex_pc) { + : HVecUnaryOperation(allocator, scalar, packed_type, vector_length, dex_pc) { DCHECK(!scalar->IsVecOperation()); } @@ -285,13 +290,13 @@ class HVecReplicateScalar FINAL : public HVecUnaryOperation { // TODO: for now only i == 1 case supported. class HVecExtractScalar FINAL : public HVecUnaryOperation { public: - HVecExtractScalar(ArenaAllocator* arena, + HVecExtractScalar(ArenaAllocator* allocator, HInstruction* input, DataType::Type packed_type, size_t vector_length, size_t index, uint32_t dex_pc) - : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) { + : HVecUnaryOperation(allocator, input, packed_type, vector_length, dex_pc) { DCHECK(HasConsistentPackedTypes(input, packed_type)); DCHECK_LT(index, vector_length); DCHECK_EQ(index, 0u); @@ -323,13 +328,13 @@ class HVecReduce FINAL : public HVecUnaryOperation { kMax = 3 }; - HVecReduce(ArenaAllocator* arena, + HVecReduce(ArenaAllocator* allocator, HInstruction* input, DataType::Type packed_type, size_t vector_length, ReductionKind kind, uint32_t dex_pc) - : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc), + : HVecUnaryOperation(allocator, input, packed_type, vector_length, dex_pc), kind_(kind) { DCHECK(HasConsistentPackedTypes(input, packed_type)); } @@ -356,12 +361,12 @@ class HVecReduce FINAL : public HVecUnaryOperation { // viz. cnv[ x1, .. , xn ] = [ cnv(x1), .. , cnv(xn) ]. class HVecCnv FINAL : public HVecUnaryOperation { public: - HVecCnv(ArenaAllocator* arena, + HVecCnv(ArenaAllocator* allocator, HInstruction* input, DataType::Type packed_type, size_t vector_length, uint32_t dex_pc) - : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) { + : HVecUnaryOperation(allocator, input, packed_type, vector_length, dex_pc) { DCHECK(input->IsVecOperation()); DCHECK_NE(GetInputType(), GetResultType()); // actual convert } @@ -381,12 +386,12 @@ class HVecCnv FINAL : public HVecUnaryOperation { // viz. neg[ x1, .. , xn ] = [ -x1, .. , -xn ]. class HVecNeg FINAL : public HVecUnaryOperation { public: - HVecNeg(ArenaAllocator* arena, + HVecNeg(ArenaAllocator* allocator, HInstruction* input, DataType::Type packed_type, size_t vector_length, uint32_t dex_pc) - : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) { + : HVecUnaryOperation(allocator, input, packed_type, vector_length, dex_pc) { DCHECK(HasConsistentPackedTypes(input, packed_type)); } @@ -403,12 +408,12 @@ class HVecNeg FINAL : public HVecUnaryOperation { // for signed operand x. class HVecAbs FINAL : public HVecUnaryOperation { public: - HVecAbs(ArenaAllocator* arena, + HVecAbs(ArenaAllocator* allocator, HInstruction* input, DataType::Type packed_type, size_t vector_length, uint32_t dex_pc) - : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) { + : HVecUnaryOperation(allocator, input, packed_type, vector_length, dex_pc) { DCHECK(HasConsistentPackedTypes(input, packed_type)); } @@ -425,12 +430,12 @@ class HVecAbs FINAL : public HVecUnaryOperation { // not[ x1, .. , xn ] = [ !x1, .. , !xn ] for boolean. class HVecNot FINAL : public HVecUnaryOperation { public: - HVecNot(ArenaAllocator* arena, + HVecNot(ArenaAllocator* allocator, HInstruction* input, DataType::Type packed_type, size_t vector_length, uint32_t dex_pc) - : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) { + : HVecUnaryOperation(allocator, input, packed_type, vector_length, dex_pc) { DCHECK(input->IsVecOperation()); } @@ -450,13 +455,13 @@ class HVecNot FINAL : public HVecUnaryOperation { // viz. [ x1, .. , xn ] + [ y1, .. , yn ] = [ x1 + y1, .. , xn + yn ]. class HVecAdd FINAL : public HVecBinaryOperation { public: - HVecAdd(ArenaAllocator* arena, + HVecAdd(ArenaAllocator* allocator, HInstruction* left, HInstruction* right, DataType::Type packed_type, size_t vector_length, uint32_t dex_pc) - : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) { + : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) { DCHECK(HasConsistentPackedTypes(left, packed_type)); DCHECK(HasConsistentPackedTypes(right, packed_type)); } @@ -475,7 +480,7 @@ class HVecAdd FINAL : public HVecBinaryOperation { // for either both signed or both unsigned operands x, y. class HVecHalvingAdd FINAL : public HVecBinaryOperation { public: - HVecHalvingAdd(ArenaAllocator* arena, + HVecHalvingAdd(ArenaAllocator* allocator, HInstruction* left, HInstruction* right, DataType::Type packed_type, @@ -483,7 +488,7 @@ class HVecHalvingAdd FINAL : public HVecBinaryOperation { bool is_rounded, bool is_unsigned, uint32_t dex_pc) - : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) { + : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) { // The `is_unsigned` flag should be used exclusively with the Int32 or Int64. // This flag is a temporary measure while we do not have the Uint32 and Uint64 data types. DCHECK(!is_unsigned || @@ -524,13 +529,13 @@ class HVecHalvingAdd FINAL : public HVecBinaryOperation { // viz. [ x1, .. , xn ] - [ y1, .. , yn ] = [ x1 - y1, .. , xn - yn ]. class HVecSub FINAL : public HVecBinaryOperation { public: - HVecSub(ArenaAllocator* arena, + HVecSub(ArenaAllocator* allocator, HInstruction* left, HInstruction* right, DataType::Type packed_type, size_t vector_length, uint32_t dex_pc) - : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) { + : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) { DCHECK(HasConsistentPackedTypes(left, packed_type)); DCHECK(HasConsistentPackedTypes(right, packed_type)); } @@ -547,13 +552,13 @@ class HVecSub FINAL : public HVecBinaryOperation { // viz. [ x1, .. , xn ] * [ y1, .. , yn ] = [ x1 * y1, .. , xn * yn ]. class HVecMul FINAL : public HVecBinaryOperation { public: - HVecMul(ArenaAllocator* arena, + HVecMul(ArenaAllocator* allocator, HInstruction* left, HInstruction* right, DataType::Type packed_type, size_t vector_length, uint32_t dex_pc) - : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) { + : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) { DCHECK(HasConsistentPackedTypes(left, packed_type)); DCHECK(HasConsistentPackedTypes(right, packed_type)); } @@ -570,13 +575,13 @@ class HVecMul FINAL : public HVecBinaryOperation { // viz. [ x1, .. , xn ] / [ y1, .. , yn ] = [ x1 / y1, .. , xn / yn ]. class HVecDiv FINAL : public HVecBinaryOperation { public: - HVecDiv(ArenaAllocator* arena, + HVecDiv(ArenaAllocator* allocator, HInstruction* left, HInstruction* right, DataType::Type packed_type, size_t vector_length, uint32_t dex_pc) - : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) { + : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) { DCHECK(HasConsistentPackedTypes(left, packed_type)); DCHECK(HasConsistentPackedTypes(right, packed_type)); } @@ -594,14 +599,14 @@ class HVecDiv FINAL : public HVecBinaryOperation { // for either both signed or both unsigned operands x, y. class HVecMin FINAL : public HVecBinaryOperation { public: - HVecMin(ArenaAllocator* arena, + HVecMin(ArenaAllocator* allocator, HInstruction* left, HInstruction* right, DataType::Type packed_type, size_t vector_length, bool is_unsigned, uint32_t dex_pc) - : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) { + : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) { // The `is_unsigned` flag should be used exclusively with the Int32 or Int64. // This flag is a temporary measure while we do not have the Uint32 and Uint64 data types. DCHECK(!is_unsigned || @@ -638,14 +643,14 @@ class HVecMin FINAL : public HVecBinaryOperation { // for either both signed or both unsigned operands x, y. class HVecMax FINAL : public HVecBinaryOperation { public: - HVecMax(ArenaAllocator* arena, + HVecMax(ArenaAllocator* allocator, HInstruction* left, HInstruction* right, DataType::Type packed_type, size_t vector_length, bool is_unsigned, uint32_t dex_pc) - : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) { + : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) { // The `is_unsigned` flag should be used exclusively with the Int32 or Int64. // This flag is a temporary measure while we do not have the Uint32 and Uint64 data types. DCHECK(!is_unsigned || @@ -681,13 +686,13 @@ class HVecMax FINAL : public HVecBinaryOperation { // viz. [ x1, .. , xn ] & [ y1, .. , yn ] = [ x1 & y1, .. , xn & yn ]. class HVecAnd FINAL : public HVecBinaryOperation { public: - HVecAnd(ArenaAllocator* arena, + HVecAnd(ArenaAllocator* allocator, HInstruction* left, HInstruction* right, DataType::Type packed_type, size_t vector_length, uint32_t dex_pc) - : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) { + : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) { DCHECK(left->IsVecOperation() && right->IsVecOperation()); } @@ -703,13 +708,13 @@ class HVecAnd FINAL : public HVecBinaryOperation { // viz. [ x1, .. , xn ] and-not [ y1, .. , yn ] = [ ~x1 & y1, .. , ~xn & yn ]. class HVecAndNot FINAL : public HVecBinaryOperation { public: - HVecAndNot(ArenaAllocator* arena, + HVecAndNot(ArenaAllocator* allocator, HInstruction* left, HInstruction* right, DataType::Type packed_type, size_t vector_length, uint32_t dex_pc) - : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) { + : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) { DCHECK(left->IsVecOperation() && right->IsVecOperation()); } @@ -725,13 +730,13 @@ class HVecAndNot FINAL : public HVecBinaryOperation { // viz. [ x1, .. , xn ] | [ y1, .. , yn ] = [ x1 | y1, .. , xn | yn ]. class HVecOr FINAL : public HVecBinaryOperation { public: - HVecOr(ArenaAllocator* arena, + HVecOr(ArenaAllocator* allocator, HInstruction* left, HInstruction* right, DataType::Type packed_type, size_t vector_length, uint32_t dex_pc) - : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) { + : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) { DCHECK(left->IsVecOperation() && right->IsVecOperation()); } @@ -747,13 +752,13 @@ class HVecOr FINAL : public HVecBinaryOperation { // viz. [ x1, .. , xn ] ^ [ y1, .. , yn ] = [ x1 ^ y1, .. , xn ^ yn ]. class HVecXor FINAL : public HVecBinaryOperation { public: - HVecXor(ArenaAllocator* arena, + HVecXor(ArenaAllocator* allocator, HInstruction* left, HInstruction* right, DataType::Type packed_type, size_t vector_length, uint32_t dex_pc) - : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) { + : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) { DCHECK(left->IsVecOperation() && right->IsVecOperation()); } @@ -769,13 +774,13 @@ class HVecXor FINAL : public HVecBinaryOperation { // viz. [ x1, .. , xn ] << d = [ x1 << d, .. , xn << d ]. class HVecShl FINAL : public HVecBinaryOperation { public: - HVecShl(ArenaAllocator* arena, + HVecShl(ArenaAllocator* allocator, HInstruction* left, HInstruction* right, DataType::Type packed_type, size_t vector_length, uint32_t dex_pc) - : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) { + : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) { DCHECK(HasConsistentPackedTypes(left, packed_type)); } @@ -791,13 +796,13 @@ class HVecShl FINAL : public HVecBinaryOperation { // viz. [ x1, .. , xn ] >> d = [ x1 >> d, .. , xn >> d ]. class HVecShr FINAL : public HVecBinaryOperation { public: - HVecShr(ArenaAllocator* arena, + HVecShr(ArenaAllocator* allocator, HInstruction* left, HInstruction* right, DataType::Type packed_type, size_t vector_length, uint32_t dex_pc) - : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) { + : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) { DCHECK(HasConsistentPackedTypes(left, packed_type)); } @@ -813,13 +818,13 @@ class HVecShr FINAL : public HVecBinaryOperation { // viz. [ x1, .. , xn ] >>> d = [ x1 >>> d, .. , xn >>> d ]. class HVecUShr FINAL : public HVecBinaryOperation { public: - HVecUShr(ArenaAllocator* arena, + HVecUShr(ArenaAllocator* allocator, HInstruction* left, HInstruction* right, DataType::Type packed_type, size_t vector_length, uint32_t dex_pc) - : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) { + : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) { DCHECK(HasConsistentPackedTypes(left, packed_type)); } @@ -840,13 +845,13 @@ class HVecUShr FINAL : public HVecBinaryOperation { // set( array(x1, .. , xm) ) = [ x1, .. , xm, 0, .. , 0 ] if m < n. class HVecSetScalars FINAL : public HVecOperation { public: - HVecSetScalars(ArenaAllocator* arena, + HVecSetScalars(ArenaAllocator* allocator, HInstruction* scalars[], DataType::Type packed_type, size_t vector_length, size_t number_of_scalars, uint32_t dex_pc) - : HVecOperation(arena, + : HVecOperation(allocator, packed_type, SideEffects::None(), number_of_scalars, @@ -872,7 +877,7 @@ class HVecSetScalars FINAL : public HVecOperation { // viz. [ a1, .. , an ] + [ x1, .. , xn ] * [ y1, .. , yn ] = [ a1 + x1 * y1, .. , an + xn * yn ]. class HVecMultiplyAccumulate FINAL : public HVecOperation { public: - HVecMultiplyAccumulate(ArenaAllocator* arena, + HVecMultiplyAccumulate(ArenaAllocator* allocator, InstructionKind op, HInstruction* accumulator, HInstruction* mul_left, @@ -880,7 +885,7 @@ class HVecMultiplyAccumulate FINAL : public HVecOperation { DataType::Type packed_type, size_t vector_length, uint32_t dex_pc) - : HVecOperation(arena, + : HVecOperation(allocator, packed_type, SideEffects::None(), /* number_of_inputs */ 3, @@ -922,14 +927,14 @@ class HVecMultiplyAccumulate FINAL : public HVecOperation { // for m <= n, non-overlapping sums, and signed operands x, y. class HVecSADAccumulate FINAL : public HVecOperation { public: - HVecSADAccumulate(ArenaAllocator* arena, + HVecSADAccumulate(ArenaAllocator* allocator, HInstruction* accumulator, HInstruction* sad_left, HInstruction* sad_right, DataType::Type packed_type, size_t vector_length, uint32_t dex_pc) - : HVecOperation(arena, + : HVecOperation(allocator, packed_type, SideEffects::None(), /* number_of_inputs */ 3, @@ -955,7 +960,7 @@ class HVecSADAccumulate FINAL : public HVecOperation { // yield the vector [ mem(1), .. , mem(n) ]. class HVecLoad FINAL : public HVecMemoryOperation { public: - HVecLoad(ArenaAllocator* arena, + HVecLoad(ArenaAllocator* allocator, HInstruction* base, HInstruction* index, DataType::Type packed_type, @@ -963,7 +968,7 @@ class HVecLoad FINAL : public HVecMemoryOperation { size_t vector_length, bool is_string_char_at, uint32_t dex_pc) - : HVecMemoryOperation(arena, + : HVecMemoryOperation(allocator, packed_type, side_effects, /* number_of_inputs */ 2, @@ -999,7 +1004,7 @@ class HVecLoad FINAL : public HVecMemoryOperation { // sets mem(1) = x1, .. , mem(n) = xn. class HVecStore FINAL : public HVecMemoryOperation { public: - HVecStore(ArenaAllocator* arena, + HVecStore(ArenaAllocator* allocator, HInstruction* base, HInstruction* index, HInstruction* value, @@ -1007,7 +1012,7 @@ class HVecStore FINAL : public HVecMemoryOperation { SideEffects side_effects, size_t vector_length, uint32_t dex_pc) - : HVecMemoryOperation(arena, + : HVecMemoryOperation(allocator, packed_type, side_effects, /* number_of_inputs */ 3, diff --git a/compiler/optimizing/nodes_vector_test.cc b/compiler/optimizing/nodes_vector_test.cc index d3a499cb31..ab9d7594d9 100644 --- a/compiler/optimizing/nodes_vector_test.cc +++ b/compiler/optimizing/nodes_vector_test.cc @@ -23,12 +23,10 @@ namespace art { /** * Fixture class for testing vector nodes. */ -class NodesVectorTest : public CommonCompilerTest { +class NodesVectorTest : public OptimizingUnitTest { public: NodesVectorTest() - : pool_(), - allocator_(&pool_), - graph_(CreateGraph(&allocator_)) { + : graph_(CreateGraph()) { BuildGraph(); } @@ -36,32 +34,30 @@ class NodesVectorTest : public CommonCompilerTest { void BuildGraph() { graph_->SetNumberOfVRegs(1); - entry_block_ = new (&allocator_) HBasicBlock(graph_); - exit_block_ = new (&allocator_) HBasicBlock(graph_); + entry_block_ = new (GetAllocator()) HBasicBlock(graph_); + exit_block_ = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(entry_block_); graph_->AddBlock(exit_block_); graph_->SetEntryBlock(entry_block_); graph_->SetExitBlock(exit_block_); - int8_parameter_ = new (&allocator_) HParameterValue(graph_->GetDexFile(), - dex::TypeIndex(1), - 0, - DataType::Type::kInt8); + int8_parameter_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(), + dex::TypeIndex(1), + 0, + DataType::Type::kInt8); entry_block_->AddInstruction(int8_parameter_); - int16_parameter_ = new (&allocator_) HParameterValue(graph_->GetDexFile(), - dex::TypeIndex(2), - 0, - DataType::Type::kInt16); + int16_parameter_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(), + dex::TypeIndex(2), + 0, + DataType::Type::kInt16); entry_block_->AddInstruction(int16_parameter_); - int32_parameter_ = new (&allocator_) HParameterValue(graph_->GetDexFile(), - dex::TypeIndex(0), - 0, - DataType::Type::kInt32); + int32_parameter_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(), + dex::TypeIndex(0), + 0, + DataType::Type::kInt32); entry_block_->AddInstruction(int32_parameter_); } // General building fields. - ArenaPool pool_; - ArenaAllocator allocator_; HGraph* graph_; HBasicBlock* entry_block_; @@ -134,16 +130,16 @@ TEST(NodesVector, AlignmentString) { } TEST_F(NodesVectorTest, VectorOperationProperties) { - HVecOperation* v0 = new (&allocator_) - HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc); - HVecOperation* v1 = new (&allocator_) - HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc); - HVecOperation* v2 = new (&allocator_) - HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt32, 2, kNoDexPc); - HVecOperation* v3 = new (&allocator_) - HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt16, 4, kNoDexPc); - HVecOperation* v4 = new (&allocator_) HVecStore( - &allocator_, + HVecOperation* v0 = new (GetAllocator()) + HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc); + HVecOperation* v1 = new (GetAllocator()) + HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc); + HVecOperation* v2 = new (GetAllocator()) + HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 2, kNoDexPc); + HVecOperation* v3 = new (GetAllocator()) + HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt16, 4, kNoDexPc); + HVecOperation* v4 = new (GetAllocator()) HVecStore( + GetAllocator(), int32_parameter_, int32_parameter_, v0, @@ -198,30 +194,30 @@ TEST_F(NodesVectorTest, VectorOperationProperties) { } TEST_F(NodesVectorTest, VectorAlignmentAndStringCharAtMatterOnLoad) { - HVecLoad* v0 = new (&allocator_) HVecLoad(&allocator_, - int32_parameter_, - int32_parameter_, - DataType::Type::kInt32, - SideEffects::ArrayReadOfType(DataType::Type::kInt32), - 4, - /*is_string_char_at*/ false, - kNoDexPc); - HVecLoad* v1 = new (&allocator_) HVecLoad(&allocator_, - int32_parameter_, - int32_parameter_, - DataType::Type::kInt32, - SideEffects::ArrayReadOfType(DataType::Type::kInt32), - 4, - /*is_string_char_at*/ false, - kNoDexPc); - HVecLoad* v2 = new (&allocator_) HVecLoad(&allocator_, - int32_parameter_, - int32_parameter_, - DataType::Type::kInt32, - SideEffects::ArrayReadOfType(DataType::Type::kInt32), - 4, - /*is_string_char_at*/ true, - kNoDexPc); + HVecLoad* v0 = new (GetAllocator()) HVecLoad(GetAllocator(), + int32_parameter_, + int32_parameter_, + DataType::Type::kInt32, + SideEffects::ArrayReadOfType(DataType::Type::kInt32), + 4, + /*is_string_char_at*/ false, + kNoDexPc); + HVecLoad* v1 = new (GetAllocator()) HVecLoad(GetAllocator(), + int32_parameter_, + int32_parameter_, + DataType::Type::kInt32, + SideEffects::ArrayReadOfType(DataType::Type::kInt32), + 4, + /*is_string_char_at*/ false, + kNoDexPc); + HVecLoad* v2 = new (GetAllocator()) HVecLoad(GetAllocator(), + int32_parameter_, + int32_parameter_, + DataType::Type::kInt32, + SideEffects::ArrayReadOfType(DataType::Type::kInt32), + 4, + /*is_string_char_at*/ true, + kNoDexPc); EXPECT_TRUE(v0->CanBeMoved()); EXPECT_TRUE(v1->CanBeMoved()); @@ -250,10 +246,10 @@ TEST_F(NodesVectorTest, VectorAlignmentAndStringCharAtMatterOnLoad) { } TEST_F(NodesVectorTest, VectorAlignmentMattersOnStore) { - HVecOperation* p0 = new (&allocator_) - HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc); - HVecStore* v0 = new (&allocator_) HVecStore( - &allocator_, + HVecOperation* p0 = new (GetAllocator()) + HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc); + HVecStore* v0 = new (GetAllocator()) HVecStore( + GetAllocator(), int32_parameter_, int32_parameter_, p0, @@ -261,8 +257,8 @@ TEST_F(NodesVectorTest, VectorAlignmentMattersOnStore) { SideEffects::ArrayWriteOfType(DataType::Type::kInt32), 4, kNoDexPc); - HVecStore* v1 = new (&allocator_) HVecStore( - &allocator_, + HVecStore* v1 = new (GetAllocator()) HVecStore( + GetAllocator(), int32_parameter_, int32_parameter_, p0, @@ -287,27 +283,27 @@ TEST_F(NodesVectorTest, VectorAlignmentMattersOnStore) { } TEST_F(NodesVectorTest, VectorSignMattersOnMin) { - HVecOperation* p0 = new (&allocator_) - HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc); - HVecOperation* p1 = new (&allocator_) - HVecReplicateScalar(&allocator_, int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc); - HVecOperation* p2 = new (&allocator_) - HVecReplicateScalar(&allocator_, int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc); - - HVecMin* v0 = new (&allocator_) HVecMin( - &allocator_, p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ true, kNoDexPc); - HVecMin* v1 = new (&allocator_) HVecMin( - &allocator_, p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ false, kNoDexPc); - HVecMin* v2 = new (&allocator_) HVecMin( - &allocator_, p0, p0, DataType::Type::kInt32, 2, /*is_unsigned*/ true, kNoDexPc); - HVecMin* v3 = new (&allocator_) HVecMin( - &allocator_, p1, p1, DataType::Type::kUint8, 16, /*is_unsigned*/ false, kNoDexPc); - HVecMin* v4 = new (&allocator_) HVecMin( - &allocator_, p1, p1, DataType::Type::kInt8, 16, /*is_unsigned*/ false, kNoDexPc); - HVecMin* v5 = new (&allocator_) HVecMin( - &allocator_, p2, p2, DataType::Type::kUint16, 8, /*is_unsigned*/ false, kNoDexPc); - HVecMin* v6 = new (&allocator_) HVecMin( - &allocator_, p2, p2, DataType::Type::kInt16, 8, /*is_unsigned*/ false, kNoDexPc); + HVecOperation* p0 = new (GetAllocator()) + HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc); + HVecOperation* p1 = new (GetAllocator()) + HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc); + HVecOperation* p2 = new (GetAllocator()) + HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc); + + HVecMin* v0 = new (GetAllocator()) HVecMin( + GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ true, kNoDexPc); + HVecMin* v1 = new (GetAllocator()) HVecMin( + GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ false, kNoDexPc); + HVecMin* v2 = new (GetAllocator()) HVecMin( + GetAllocator(), p0, p0, DataType::Type::kInt32, 2, /*is_unsigned*/ true, kNoDexPc); + HVecMin* v3 = new (GetAllocator()) HVecMin( + GetAllocator(), p1, p1, DataType::Type::kUint8, 16, /*is_unsigned*/ false, kNoDexPc); + HVecMin* v4 = new (GetAllocator()) HVecMin( + GetAllocator(), p1, p1, DataType::Type::kInt8, 16, /*is_unsigned*/ false, kNoDexPc); + HVecMin* v5 = new (GetAllocator()) HVecMin( + GetAllocator(), p2, p2, DataType::Type::kUint16, 8, /*is_unsigned*/ false, kNoDexPc); + HVecMin* v6 = new (GetAllocator()) HVecMin( + GetAllocator(), p2, p2, DataType::Type::kInt16, 8, /*is_unsigned*/ false, kNoDexPc); HVecMin* min_insns[] = { v0, v1, v2, v3, v4, v5, v6 }; EXPECT_FALSE(p0->CanBeMoved()); @@ -331,27 +327,27 @@ TEST_F(NodesVectorTest, VectorSignMattersOnMin) { } TEST_F(NodesVectorTest, VectorSignMattersOnMax) { - HVecOperation* p0 = new (&allocator_) - HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc); - HVecOperation* p1 = new (&allocator_) - HVecReplicateScalar(&allocator_, int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc); - HVecOperation* p2 = new (&allocator_) - HVecReplicateScalar(&allocator_, int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc); - - HVecMax* v0 = new (&allocator_) HVecMax( - &allocator_, p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ true, kNoDexPc); - HVecMax* v1 = new (&allocator_) HVecMax( - &allocator_, p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ false, kNoDexPc); - HVecMax* v2 = new (&allocator_) HVecMax( - &allocator_, p0, p0, DataType::Type::kInt32, 2, /*is_unsigned*/ true, kNoDexPc); - HVecMax* v3 = new (&allocator_) HVecMax( - &allocator_, p1, p1, DataType::Type::kUint8, 16, /*is_unsigned*/ false, kNoDexPc); - HVecMax* v4 = new (&allocator_) HVecMax( - &allocator_, p1, p1, DataType::Type::kInt8, 16, /*is_unsigned*/ false, kNoDexPc); - HVecMax* v5 = new (&allocator_) HVecMax( - &allocator_, p2, p2, DataType::Type::kUint16, 8, /*is_unsigned*/ false, kNoDexPc); - HVecMax* v6 = new (&allocator_) HVecMax( - &allocator_, p2, p2, DataType::Type::kInt16, 8, /*is_unsigned*/ false, kNoDexPc); + HVecOperation* p0 = new (GetAllocator()) + HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc); + HVecOperation* p1 = new (GetAllocator()) + HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc); + HVecOperation* p2 = new (GetAllocator()) + HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc); + + HVecMax* v0 = new (GetAllocator()) HVecMax( + GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ true, kNoDexPc); + HVecMax* v1 = new (GetAllocator()) HVecMax( + GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ false, kNoDexPc); + HVecMax* v2 = new (GetAllocator()) HVecMax( + GetAllocator(), p0, p0, DataType::Type::kInt32, 2, /*is_unsigned*/ true, kNoDexPc); + HVecMax* v3 = new (GetAllocator()) HVecMax( + GetAllocator(), p1, p1, DataType::Type::kUint8, 16, /*is_unsigned*/ false, kNoDexPc); + HVecMax* v4 = new (GetAllocator()) HVecMax( + GetAllocator(), p1, p1, DataType::Type::kInt8, 16, /*is_unsigned*/ false, kNoDexPc); + HVecMax* v5 = new (GetAllocator()) HVecMax( + GetAllocator(), p2, p2, DataType::Type::kUint16, 8, /*is_unsigned*/ false, kNoDexPc); + HVecMax* v6 = new (GetAllocator()) HVecMax( + GetAllocator(), p2, p2, DataType::Type::kInt16, 8, /*is_unsigned*/ false, kNoDexPc); HVecMax* max_insns[] = { v0, v1, v2, v3, v4, v5, v6 }; EXPECT_FALSE(p0->CanBeMoved()); @@ -375,51 +371,51 @@ TEST_F(NodesVectorTest, VectorSignMattersOnMax) { } TEST_F(NodesVectorTest, VectorAttributesMatterOnHalvingAdd) { - HVecOperation* p0 = new (&allocator_) - HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc); - HVecOperation* p1 = new (&allocator_) - HVecReplicateScalar(&allocator_, int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc); - HVecOperation* p2 = new (&allocator_) - HVecReplicateScalar(&allocator_, int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc); - - HVecHalvingAdd* v0 = new (&allocator_) HVecHalvingAdd( - &allocator_, p0, p0, DataType::Type::kInt32, 4, + HVecOperation* p0 = new (GetAllocator()) + HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc); + HVecOperation* p1 = new (GetAllocator()) + HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc); + HVecOperation* p2 = new (GetAllocator()) + HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc); + + HVecHalvingAdd* v0 = new (GetAllocator()) HVecHalvingAdd( + GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_rounded*/ true, /*is_unsigned*/ true, kNoDexPc); - HVecHalvingAdd* v1 = new (&allocator_) HVecHalvingAdd( - &allocator_, p0, p0, DataType::Type::kInt32, 4, + HVecHalvingAdd* v1 = new (GetAllocator()) HVecHalvingAdd( + GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_rounded*/ false, /*is_unsigned*/ true, kNoDexPc); - HVecHalvingAdd* v2 = new (&allocator_) HVecHalvingAdd( - &allocator_, p0, p0, DataType::Type::kInt32, 4, + HVecHalvingAdd* v2 = new (GetAllocator()) HVecHalvingAdd( + GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc); - HVecHalvingAdd* v3 = new (&allocator_) HVecHalvingAdd( - &allocator_, p0, p0, DataType::Type::kInt32, 4, + HVecHalvingAdd* v3 = new (GetAllocator()) HVecHalvingAdd( + GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc); - HVecHalvingAdd* v4 = new (&allocator_) HVecHalvingAdd( - &allocator_, p0, p0, DataType::Type::kInt32, 2, + HVecHalvingAdd* v4 = new (GetAllocator()) HVecHalvingAdd( + GetAllocator(), p0, p0, DataType::Type::kInt32, 2, /*is_rounded*/ true, /*is_unsigned*/ true, kNoDexPc); - HVecHalvingAdd* v5 = new (&allocator_) HVecHalvingAdd( - &allocator_, p1, p1, DataType::Type::kUint8, 16, + HVecHalvingAdd* v5 = new (GetAllocator()) HVecHalvingAdd( + GetAllocator(), p1, p1, DataType::Type::kUint8, 16, /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc); - HVecHalvingAdd* v6 = new (&allocator_) HVecHalvingAdd( - &allocator_, p1, p1, DataType::Type::kUint8, 16, + HVecHalvingAdd* v6 = new (GetAllocator()) HVecHalvingAdd( + GetAllocator(), p1, p1, DataType::Type::kUint8, 16, /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc); - HVecHalvingAdd* v7 = new (&allocator_) HVecHalvingAdd( - &allocator_, p1, p1, DataType::Type::kInt8, 16, + HVecHalvingAdd* v7 = new (GetAllocator()) HVecHalvingAdd( + GetAllocator(), p1, p1, DataType::Type::kInt8, 16, /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc); - HVecHalvingAdd* v8 = new (&allocator_) HVecHalvingAdd( - &allocator_, p1, p1, DataType::Type::kInt8, 16, + HVecHalvingAdd* v8 = new (GetAllocator()) HVecHalvingAdd( + GetAllocator(), p1, p1, DataType::Type::kInt8, 16, /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc); - HVecHalvingAdd* v9 = new (&allocator_) HVecHalvingAdd( - &allocator_, p2, p2, DataType::Type::kUint16, 8, + HVecHalvingAdd* v9 = new (GetAllocator()) HVecHalvingAdd( + GetAllocator(), p2, p2, DataType::Type::kUint16, 8, /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc); - HVecHalvingAdd* v10 = new (&allocator_) HVecHalvingAdd( - &allocator_, p2, p2, DataType::Type::kUint16, 8, + HVecHalvingAdd* v10 = new (GetAllocator()) HVecHalvingAdd( + GetAllocator(), p2, p2, DataType::Type::kUint16, 8, /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc); - HVecHalvingAdd* v11 = new (&allocator_) HVecHalvingAdd( - &allocator_, p2, p2, DataType::Type::kInt16, 2, + HVecHalvingAdd* v11 = new (GetAllocator()) HVecHalvingAdd( + GetAllocator(), p2, p2, DataType::Type::kInt16, 2, /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc); - HVecHalvingAdd* v12 = new (&allocator_) HVecHalvingAdd( - &allocator_, p2, p2, DataType::Type::kInt16, 2, + HVecHalvingAdd* v12 = new (GetAllocator()) HVecHalvingAdd( + GetAllocator(), p2, p2, DataType::Type::kInt16, 2, /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc); HVecHalvingAdd* hadd_insns[] = { v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12 }; @@ -460,15 +456,15 @@ TEST_F(NodesVectorTest, VectorAttributesMatterOnHalvingAdd) { } TEST_F(NodesVectorTest, VectorOperationMattersOnMultiplyAccumulate) { - HVecOperation* v0 = new (&allocator_) - HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc); + HVecOperation* v0 = new (GetAllocator()) + HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc); - HVecMultiplyAccumulate* v1 = new (&allocator_) HVecMultiplyAccumulate( - &allocator_, HInstruction::kAdd, v0, v0, v0, DataType::Type::kInt32, 4, kNoDexPc); - HVecMultiplyAccumulate* v2 = new (&allocator_) HVecMultiplyAccumulate( - &allocator_, HInstruction::kSub, v0, v0, v0, DataType::Type::kInt32, 4, kNoDexPc); - HVecMultiplyAccumulate* v3 = new (&allocator_) HVecMultiplyAccumulate( - &allocator_, HInstruction::kAdd, v0, v0, v0, DataType::Type::kInt32, 2, kNoDexPc); + HVecMultiplyAccumulate* v1 = new (GetAllocator()) HVecMultiplyAccumulate( + GetAllocator(), HInstruction::kAdd, v0, v0, v0, DataType::Type::kInt32, 4, kNoDexPc); + HVecMultiplyAccumulate* v2 = new (GetAllocator()) HVecMultiplyAccumulate( + GetAllocator(), HInstruction::kSub, v0, v0, v0, DataType::Type::kInt32, 4, kNoDexPc); + HVecMultiplyAccumulate* v3 = new (GetAllocator()) HVecMultiplyAccumulate( + GetAllocator(), HInstruction::kAdd, v0, v0, v0, DataType::Type::kInt32, 2, kNoDexPc); EXPECT_FALSE(v0->CanBeMoved()); EXPECT_TRUE(v1->CanBeMoved()); @@ -488,15 +484,15 @@ TEST_F(NodesVectorTest, VectorOperationMattersOnMultiplyAccumulate) { } TEST_F(NodesVectorTest, VectorKindMattersOnReduce) { - HVecOperation* v0 = new (&allocator_) - HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc); - - HVecReduce* v1 = new (&allocator_) HVecReduce( - &allocator_, v0, DataType::Type::kInt32, 4, HVecReduce::kSum, kNoDexPc); - HVecReduce* v2 = new (&allocator_) HVecReduce( - &allocator_, v0, DataType::Type::kInt32, 4, HVecReduce::kMin, kNoDexPc); - HVecReduce* v3 = new (&allocator_) HVecReduce( - &allocator_, v0, DataType::Type::kInt32, 4, HVecReduce::kMax, kNoDexPc); + HVecOperation* v0 = new (GetAllocator()) + HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc); + + HVecReduce* v1 = new (GetAllocator()) HVecReduce( + GetAllocator(), v0, DataType::Type::kInt32, 4, HVecReduce::kSum, kNoDexPc); + HVecReduce* v2 = new (GetAllocator()) HVecReduce( + GetAllocator(), v0, DataType::Type::kInt32, 4, HVecReduce::kMin, kNoDexPc); + HVecReduce* v3 = new (GetAllocator()) HVecReduce( + GetAllocator(), v0, DataType::Type::kInt32, 4, HVecReduce::kMax, kNoDexPc); EXPECT_FALSE(v0->CanBeMoved()); EXPECT_TRUE(v1->CanBeMoved()); diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc index 99d5284714..bd65cbf25e 100644 --- a/compiler/optimizing/optimizing_cfi_test.cc +++ b/compiler/optimizing/optimizing_cfi_test.cc @@ -46,19 +46,20 @@ class OptimizingCFITest : public CFITest { static constexpr bool kGenerateExpected = false; OptimizingCFITest() - : pool_(), - allocator_(&pool_), + : pool_and_allocator_(), opts_(), isa_features_(), graph_(nullptr), code_gen_(), - blocks_(allocator_.Adapter()) {} + blocks_(GetAllocator()->Adapter()) {} + + ArenaAllocator* GetAllocator() { return pool_and_allocator_.GetAllocator(); } void SetUpFrame(InstructionSet isa) { // Setup simple context. std::string error; isa_features_ = InstructionSetFeatures::FromVariant(isa, "default", &error); - graph_ = CreateGraph(&allocator_); + graph_ = CreateGraph(&pool_and_allocator_); // Generate simple frame with some spills. code_gen_ = CodeGenerator::Create(graph_, isa, *isa_features_, opts_); code_gen_->GetAssembler()->cfi().SetEnabled(true); @@ -142,8 +143,7 @@ class OptimizingCFITest : public CFITest { DISALLOW_COPY_AND_ASSIGN(InternalCodeAllocator); }; - ArenaPool pool_; - ArenaAllocator allocator_; + ArenaPoolAndAllocator pool_and_allocator_; CompilerOptions opts_; std::unique_ptr<const InstructionSetFeatures> isa_features_; HGraph* graph_; diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 1e06ea86a2..9bfb7a5c50 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -45,6 +45,7 @@ #include "base/dumpable.h" #include "base/macros.h" #include "base/mutex.h" +#include "base/scoped_arena_allocator.h" #include "base/timing_logger.h" #include "bounds_check_elimination.h" #include "builder.h" @@ -108,8 +109,8 @@ static constexpr const char* kPassNameSeparator = "$"; */ class CodeVectorAllocator FINAL : public CodeAllocator { public: - explicit CodeVectorAllocator(ArenaAllocator* arena) - : memory_(arena->Adapter(kArenaAllocCodeBuffer)), + explicit CodeVectorAllocator(ArenaAllocator* allocator) + : memory_(allocator->Adapter(kArenaAllocCodeBuffer)), size_(0) {} virtual uint8_t* Allocate(size_t size) { @@ -148,7 +149,7 @@ class PassObserver : public ValueObject { cached_method_name_(), timing_logger_enabled_(compiler_driver->GetDumpPasses()), timing_logger_(timing_logger_enabled_ ? GetMethodName() : "", true, true), - disasm_info_(graph->GetArena()), + disasm_info_(graph->GetAllocator()), visualizer_oss_(), visualizer_output_(visualizer_output), visualizer_enabled_(!compiler_driver->GetCompilerOptions().GetDumpCfgFileName().empty()), @@ -351,7 +352,7 @@ class OptimizingCompiler FINAL : public Compiler { private: // Create a 'CompiledMethod' for an optimized graph. - CompiledMethod* Emit(ArenaAllocator* arena, + CompiledMethod* Emit(ArenaAllocator* allocator, CodeVectorAllocator* code_allocator, CodeGenerator* codegen, CompilerDriver* driver, @@ -364,7 +365,8 @@ class OptimizingCompiler FINAL : public Compiler { // 2) Transforms the graph to SSA. Returns null if it failed. // 3) Runs optimizations on the graph, including register allocator. // 4) Generates code with the `code_allocator` provided. - CodeGenerator* TryCompile(ArenaAllocator* arena, + CodeGenerator* TryCompile(ArenaAllocator* allocator, + ArenaStack* arena_stack, CodeVectorAllocator* code_allocator, const DexFile::CodeItem* code_item, uint32_t access_flags, @@ -452,7 +454,7 @@ static std::string ConvertPassNameToOptimizationName(const std::string& pass_nam static HOptimization* BuildOptimization( const std::string& pass_name, - ArenaAllocator* arena, + ArenaAllocator* allocator, HGraph* graph, OptimizingCompilerStats* stats, CodeGenerator* codegen, @@ -465,78 +467,79 @@ static HOptimization* BuildOptimization( std::string opt_name = ConvertPassNameToOptimizationName(pass_name); if (opt_name == BoundsCheckElimination::kBoundsCheckEliminationPassName) { CHECK(most_recent_side_effects != nullptr && most_recent_induction != nullptr); - return new (arena) BoundsCheckElimination(graph, - *most_recent_side_effects, - most_recent_induction); + return new (allocator) BoundsCheckElimination(graph, + *most_recent_side_effects, + most_recent_induction); } else if (opt_name == GVNOptimization::kGlobalValueNumberingPassName) { CHECK(most_recent_side_effects != nullptr); - return new (arena) GVNOptimization(graph, *most_recent_side_effects, pass_name.c_str()); + return new (allocator) GVNOptimization(graph, *most_recent_side_effects, pass_name.c_str()); } else if (opt_name == HConstantFolding::kConstantFoldingPassName) { - return new (arena) HConstantFolding(graph, pass_name.c_str()); + return new (allocator) HConstantFolding(graph, pass_name.c_str()); } else if (opt_name == HDeadCodeElimination::kDeadCodeEliminationPassName) { - return new (arena) HDeadCodeElimination(graph, stats, pass_name.c_str()); + return new (allocator) HDeadCodeElimination(graph, stats, pass_name.c_str()); } else if (opt_name == HInliner::kInlinerPassName) { size_t number_of_dex_registers = dex_compilation_unit.GetCodeItem()->registers_size_; - return new (arena) HInliner(graph, // outer_graph - graph, // outermost_graph - codegen, - dex_compilation_unit, // outer_compilation_unit - dex_compilation_unit, // outermost_compilation_unit - driver, - handles, - stats, - number_of_dex_registers, - /* total_number_of_instructions */ 0, - /* parent */ nullptr); + return new (allocator) HInliner(graph, // outer_graph + graph, // outermost_graph + codegen, + dex_compilation_unit, // outer_compilation_unit + dex_compilation_unit, // outermost_compilation_unit + driver, + handles, + stats, + number_of_dex_registers, + /* total_number_of_instructions */ 0, + /* parent */ nullptr); } else if (opt_name == HSharpening::kSharpeningPassName) { - return new (arena) HSharpening(graph, codegen, dex_compilation_unit, driver, handles); + return new (allocator) HSharpening(graph, codegen, dex_compilation_unit, driver, handles); } else if (opt_name == HSelectGenerator::kSelectGeneratorPassName) { - return new (arena) HSelectGenerator(graph, handles, stats); + return new (allocator) HSelectGenerator(graph, handles, stats); } else if (opt_name == HInductionVarAnalysis::kInductionPassName) { - return new (arena) HInductionVarAnalysis(graph); + return new (allocator) HInductionVarAnalysis(graph); } else if (opt_name == InstructionSimplifier::kInstructionSimplifierPassName) { - return new (arena) InstructionSimplifier(graph, codegen, driver, stats, pass_name.c_str()); + return new (allocator) InstructionSimplifier(graph, codegen, driver, stats, pass_name.c_str()); } else if (opt_name == IntrinsicsRecognizer::kIntrinsicsRecognizerPassName) { - return new (arena) IntrinsicsRecognizer(graph, stats); + return new (allocator) IntrinsicsRecognizer(graph, stats); } else if (opt_name == LICM::kLoopInvariantCodeMotionPassName) { CHECK(most_recent_side_effects != nullptr); - return new (arena) LICM(graph, *most_recent_side_effects, stats); + return new (allocator) LICM(graph, *most_recent_side_effects, stats); } else if (opt_name == LoadStoreAnalysis::kLoadStoreAnalysisPassName) { - return new (arena) LoadStoreAnalysis(graph); + return new (allocator) LoadStoreAnalysis(graph); } else if (opt_name == LoadStoreElimination::kLoadStoreEliminationPassName) { CHECK(most_recent_side_effects != nullptr); CHECK(most_recent_lsa != nullptr); - return - new (arena) LoadStoreElimination(graph, *most_recent_side_effects, *most_recent_lsa, stats); + return new (allocator) LoadStoreElimination(graph, + *most_recent_side_effects, + *most_recent_lsa, stats); } else if (opt_name == SideEffectsAnalysis::kSideEffectsAnalysisPassName) { - return new (arena) SideEffectsAnalysis(graph); + return new (allocator) SideEffectsAnalysis(graph); } else if (opt_name == HLoopOptimization::kLoopOptimizationPassName) { - return new (arena) HLoopOptimization(graph, driver, most_recent_induction, stats); + return new (allocator) HLoopOptimization(graph, driver, most_recent_induction, stats); } else if (opt_name == CHAGuardOptimization::kCHAGuardOptimizationPassName) { - return new (arena) CHAGuardOptimization(graph); + return new (allocator) CHAGuardOptimization(graph); } else if (opt_name == CodeSinking::kCodeSinkingPassName) { - return new (arena) CodeSinking(graph, stats); + return new (allocator) CodeSinking(graph, stats); } else if (opt_name == ConstructorFenceRedundancyElimination::kPassName) { - return new (arena) ConstructorFenceRedundancyElimination(graph, stats); + return new (allocator) ConstructorFenceRedundancyElimination(graph, stats); #ifdef ART_ENABLE_CODEGEN_arm } else if (opt_name == arm::InstructionSimplifierArm::kInstructionSimplifierArmPassName) { - return new (arena) arm::InstructionSimplifierArm(graph, stats); + return new (allocator) arm::InstructionSimplifierArm(graph, stats); #endif #ifdef ART_ENABLE_CODEGEN_arm64 } else if (opt_name == arm64::InstructionSimplifierArm64::kInstructionSimplifierArm64PassName) { - return new (arena) arm64::InstructionSimplifierArm64(graph, stats); + return new (allocator) arm64::InstructionSimplifierArm64(graph, stats); #endif #ifdef ART_ENABLE_CODEGEN_mips } else if (opt_name == mips::PcRelativeFixups::kPcRelativeFixupsMipsPassName) { - return new (arena) mips::PcRelativeFixups(graph, codegen, stats); + return new (allocator) mips::PcRelativeFixups(graph, codegen, stats); } else if (opt_name == mips::InstructionSimplifierMips::kInstructionSimplifierMipsPassName) { - return new (arena) mips::InstructionSimplifierMips(graph, codegen, stats); + return new (allocator) mips::InstructionSimplifierMips(graph, codegen, stats); #endif #ifdef ART_ENABLE_CODEGEN_x86 } else if (opt_name == x86::PcRelativeFixups::kPcRelativeFixupsX86PassName) { - return new (arena) x86::PcRelativeFixups(graph, codegen, stats); + return new (allocator) x86::PcRelativeFixups(graph, codegen, stats); } else if (opt_name == x86::X86MemoryOperandGeneration::kX86MemoryOperandGenerationPassName) { - return new (arena) x86::X86MemoryOperandGeneration(graph, codegen, stats); + return new (allocator) x86::X86MemoryOperandGeneration(graph, codegen, stats); #endif } return nullptr; @@ -544,7 +547,7 @@ static HOptimization* BuildOptimization( static ArenaVector<HOptimization*> BuildOptimizations( const std::vector<std::string>& pass_names, - ArenaAllocator* arena, + ArenaAllocator* allocator, HGraph* graph, OptimizingCompilerStats* stats, CodeGenerator* codegen, @@ -557,11 +560,11 @@ static ArenaVector<HOptimization*> BuildOptimizations( SideEffectsAnalysis* most_recent_side_effects = nullptr; HInductionVarAnalysis* most_recent_induction = nullptr; LoadStoreAnalysis* most_recent_lsa = nullptr; - ArenaVector<HOptimization*> ret(arena->Adapter()); + ArenaVector<HOptimization*> ret(allocator->Adapter()); for (const std::string& pass_name : pass_names) { HOptimization* opt = BuildOptimization( pass_name, - arena, + allocator, graph, stats, codegen, @@ -608,7 +611,7 @@ void OptimizingCompiler::MaybeRunInliner(HGraph* graph, return; } size_t number_of_dex_registers = dex_compilation_unit.GetCodeItem()->registers_size_; - HInliner* inliner = new (graph->GetArena()) HInliner( + HInliner* inliner = new (graph->GetAllocator()) HInliner( graph, // outer_graph graph, // outermost_graph codegen, @@ -631,17 +634,18 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set, PassObserver* pass_observer) const { UNUSED(codegen); // To avoid compilation error when compiling for svelte OptimizingCompilerStats* stats = compilation_stats_.get(); - ArenaAllocator* arena = graph->GetArena(); + ArenaAllocator* allocator = graph->GetAllocator(); switch (instruction_set) { #if defined(ART_ENABLE_CODEGEN_arm) case kThumb2: case kArm: { arm::InstructionSimplifierArm* simplifier = - new (arena) arm::InstructionSimplifierArm(graph, stats); - SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph); - GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch"); + new (allocator) arm::InstructionSimplifierArm(graph, stats); + SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph); + GVNOptimization* gvn = + new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch"); HInstructionScheduling* scheduling = - new (arena) HInstructionScheduling(graph, instruction_set, codegen); + new (allocator) HInstructionScheduling(graph, instruction_set, codegen); HOptimization* arm_optimizations[] = { simplifier, side_effects, @@ -655,11 +659,12 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set, #ifdef ART_ENABLE_CODEGEN_arm64 case kArm64: { arm64::InstructionSimplifierArm64* simplifier = - new (arena) arm64::InstructionSimplifierArm64(graph, stats); - SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph); - GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch"); + new (allocator) arm64::InstructionSimplifierArm64(graph, stats); + SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph); + GVNOptimization* gvn = + new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch"); HInstructionScheduling* scheduling = - new (arena) HInstructionScheduling(graph, instruction_set); + new (allocator) HInstructionScheduling(graph, instruction_set); HOptimization* arm64_optimizations[] = { simplifier, side_effects, @@ -673,11 +678,12 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set, #ifdef ART_ENABLE_CODEGEN_mips case kMips: { mips::InstructionSimplifierMips* simplifier = - new (arena) mips::InstructionSimplifierMips(graph, codegen, stats); - SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph); - GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch"); + new (allocator) mips::InstructionSimplifierMips(graph, codegen, stats); + SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph); + GVNOptimization* gvn = + new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch"); mips::PcRelativeFixups* pc_relative_fixups = - new (arena) mips::PcRelativeFixups(graph, codegen, stats); + new (allocator) mips::PcRelativeFixups(graph, codegen, stats); HOptimization* mips_optimizations[] = { simplifier, side_effects, @@ -690,8 +696,9 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set, #endif #ifdef ART_ENABLE_CODEGEN_mips64 case kMips64: { - SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph); - GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch"); + SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph); + GVNOptimization* gvn = + new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch"); HOptimization* mips64_optimizations[] = { side_effects, gvn, @@ -702,12 +709,13 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set, #endif #ifdef ART_ENABLE_CODEGEN_x86 case kX86: { - SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph); - GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch"); + SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph); + GVNOptimization* gvn = + new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch"); x86::PcRelativeFixups* pc_relative_fixups = - new (arena) x86::PcRelativeFixups(graph, codegen, stats); + new (allocator) x86::PcRelativeFixups(graph, codegen, stats); x86::X86MemoryOperandGeneration* memory_gen = - new (arena) x86::X86MemoryOperandGeneration(graph, codegen, stats); + new (allocator) x86::X86MemoryOperandGeneration(graph, codegen, stats); HOptimization* x86_optimizations[] = { side_effects, gvn, @@ -720,10 +728,11 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set, #endif #ifdef ART_ENABLE_CODEGEN_x86_64 case kX86_64: { - SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph); - GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch"); + SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph); + GVNOptimization* gvn = + new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch"); x86::X86MemoryOperandGeneration* memory_gen = - new (arena) x86::X86MemoryOperandGeneration(graph, codegen, stats); + new (allocator) x86::X86MemoryOperandGeneration(graph, codegen, stats); HOptimization* x86_64_optimizations[] = { side_effects, gvn, @@ -749,14 +758,19 @@ static void AllocateRegisters(HGraph* graph, pass_observer); PrepareForRegisterAllocation(graph, stats).Run(); } - SsaLivenessAnalysis liveness(graph, codegen); + // Use local allocator shared by SSA liveness analysis and register allocator. + // (Register allocator creates new objects in the liveness data.) + ScopedArenaAllocator local_allocator(graph->GetArenaStack()); + SsaLivenessAnalysis liveness(graph, codegen, &local_allocator); { PassScope scope(SsaLivenessAnalysis::kLivenessPassName, pass_observer); liveness.Analyze(); } { PassScope scope(RegisterAllocator::kRegisterAllocatorPassName, pass_observer); - RegisterAllocator::Create(graph->GetArena(), codegen, liveness, strategy)->AllocateRegisters(); + std::unique_ptr<RegisterAllocator> register_allocator = + RegisterAllocator::Create(&local_allocator, codegen, liveness, strategy); + register_allocator->AllocateRegisters(); } } @@ -767,11 +781,11 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph, PassObserver* pass_observer, VariableSizedHandleScope* handles) const { OptimizingCompilerStats* stats = compilation_stats_.get(); - ArenaAllocator* arena = graph->GetArena(); + ArenaAllocator* allocator = graph->GetAllocator(); if (driver->GetCompilerOptions().GetPassesToRun() != nullptr) { ArenaVector<HOptimization*> optimizations = BuildOptimizations( *driver->GetCompilerOptions().GetPassesToRun(), - arena, + allocator, graph, stats, codegen, @@ -782,43 +796,45 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph, return; } - HDeadCodeElimination* dce1 = new (arena) HDeadCodeElimination( + HDeadCodeElimination* dce1 = new (allocator) HDeadCodeElimination( graph, stats, "dead_code_elimination$initial"); - HDeadCodeElimination* dce2 = new (arena) HDeadCodeElimination( + HDeadCodeElimination* dce2 = new (allocator) HDeadCodeElimination( graph, stats, "dead_code_elimination$after_inlining"); - HDeadCodeElimination* dce3 = new (arena) HDeadCodeElimination( + HDeadCodeElimination* dce3 = new (allocator) HDeadCodeElimination( graph, stats, "dead_code_elimination$final"); - HConstantFolding* fold1 = new (arena) HConstantFolding(graph, "constant_folding"); - InstructionSimplifier* simplify1 = new (arena) InstructionSimplifier( + HConstantFolding* fold1 = new (allocator) HConstantFolding(graph, "constant_folding"); + InstructionSimplifier* simplify1 = new (allocator) InstructionSimplifier( graph, codegen, driver, stats); - HSelectGenerator* select_generator = new (arena) HSelectGenerator(graph, handles, stats); - HConstantFolding* fold2 = new (arena) HConstantFolding( + HSelectGenerator* select_generator = new (allocator) HSelectGenerator(graph, handles, stats); + HConstantFolding* fold2 = new (allocator) HConstantFolding( graph, "constant_folding$after_inlining"); - HConstantFolding* fold3 = new (arena) HConstantFolding(graph, "constant_folding$after_bce"); - SideEffectsAnalysis* side_effects1 = new (arena) SideEffectsAnalysis( + HConstantFolding* fold3 = new (allocator) HConstantFolding(graph, "constant_folding$after_bce"); + SideEffectsAnalysis* side_effects1 = new (allocator) SideEffectsAnalysis( graph, "side_effects$before_gvn"); - SideEffectsAnalysis* side_effects2 = new (arena) SideEffectsAnalysis( + SideEffectsAnalysis* side_effects2 = new (allocator) SideEffectsAnalysis( graph, "side_effects$before_lse"); - GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects1); - LICM* licm = new (arena) LICM(graph, *side_effects1, stats); - HInductionVarAnalysis* induction = new (arena) HInductionVarAnalysis(graph); - BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph, *side_effects1, induction); - HLoopOptimization* loop = new (arena) HLoopOptimization(graph, driver, induction, stats); - LoadStoreAnalysis* lsa = new (arena) LoadStoreAnalysis(graph); - LoadStoreElimination* lse = new (arena) LoadStoreElimination(graph, *side_effects2, *lsa, stats); - HSharpening* sharpening = new (arena) HSharpening( + GVNOptimization* gvn = new (allocator) GVNOptimization(graph, *side_effects1); + LICM* licm = new (allocator) LICM(graph, *side_effects1, stats); + HInductionVarAnalysis* induction = new (allocator) HInductionVarAnalysis(graph); + BoundsCheckElimination* bce = + new (allocator) BoundsCheckElimination(graph, *side_effects1, induction); + HLoopOptimization* loop = new (allocator) HLoopOptimization(graph, driver, induction, stats); + LoadStoreAnalysis* lsa = new (allocator) LoadStoreAnalysis(graph); + LoadStoreElimination* lse = + new (allocator) LoadStoreElimination(graph, *side_effects2, *lsa, stats); + HSharpening* sharpening = new (allocator) HSharpening( graph, codegen, dex_compilation_unit, driver, handles); - InstructionSimplifier* simplify2 = new (arena) InstructionSimplifier( + InstructionSimplifier* simplify2 = new (allocator) InstructionSimplifier( graph, codegen, driver, stats, "instruction_simplifier$after_inlining"); - InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier( + InstructionSimplifier* simplify3 = new (allocator) InstructionSimplifier( graph, codegen, driver, stats, "instruction_simplifier$after_bce"); - InstructionSimplifier* simplify4 = new (arena) InstructionSimplifier( + InstructionSimplifier* simplify4 = new (allocator) InstructionSimplifier( graph, codegen, driver, stats, "instruction_simplifier$before_codegen"); - IntrinsicsRecognizer* intrinsics = new (arena) IntrinsicsRecognizer(graph, stats); - CHAGuardOptimization* cha_guard = new (arena) CHAGuardOptimization(graph); - CodeSinking* code_sinking = new (arena) CodeSinking(graph, stats); + IntrinsicsRecognizer* intrinsics = new (allocator) IntrinsicsRecognizer(graph, stats); + CHAGuardOptimization* cha_guard = new (allocator) CHAGuardOptimization(graph); + CodeSinking* code_sinking = new (allocator) CodeSinking(graph, stats); ConstructorFenceRedundancyElimination* cfre = - new (arena) ConstructorFenceRedundancyElimination(graph, stats); + new (allocator) ConstructorFenceRedundancyElimination(graph, stats); HOptimization* optimizations1[] = { intrinsics, @@ -865,7 +881,7 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph, } static ArenaVector<linker::LinkerPatch> EmitAndSortLinkerPatches(CodeGenerator* codegen) { - ArenaVector<linker::LinkerPatch> linker_patches(codegen->GetGraph()->GetArena()->Adapter()); + ArenaVector<linker::LinkerPatch> linker_patches(codegen->GetGraph()->GetAllocator()->Adapter()); codegen->EmitLinkerPatches(&linker_patches); // Sort patches by literal offset. Required for .oat_patches encoding. @@ -877,14 +893,14 @@ static ArenaVector<linker::LinkerPatch> EmitAndSortLinkerPatches(CodeGenerator* return linker_patches; } -CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* arena, +CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* allocator, CodeVectorAllocator* code_allocator, CodeGenerator* codegen, CompilerDriver* compiler_driver, const DexFile::CodeItem* code_item) const { ArenaVector<linker::LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen); - ArenaVector<uint8_t> stack_map(arena->Adapter(kArenaAllocStackMaps)); - ArenaVector<uint8_t> method_info(arena->Adapter(kArenaAllocStackMaps)); + ArenaVector<uint8_t> stack_map(allocator->Adapter(kArenaAllocStackMaps)); + ArenaVector<uint8_t> method_info(allocator->Adapter(kArenaAllocStackMaps)); size_t stack_map_size = 0; size_t method_info_size = 0; codegen->ComputeStackMapAndMethodInfoSize(&stack_map_size, &method_info_size); @@ -912,7 +928,8 @@ CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* arena, return compiled_method; } -CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena, +CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, + ArenaStack* arena_stack, CodeVectorAllocator* code_allocator, const DexFile::CodeItem* code_item, uint32_t access_flags, @@ -970,8 +987,9 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena, /* verified_method */ nullptr, dex_cache); - HGraph* graph = new (arena) HGraph( - arena, + HGraph* graph = new (allocator) HGraph( + allocator, + arena_stack, dex_file, method_idx, compiler_driver->GetInstructionSet(), @@ -1024,7 +1042,6 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena, codegen.get(), compilation_stats_.get(), interpreter_metadata, - dex_cache, handles); GraphAnalysisResult result = builder.BuildGraph(); if (result != kAnalysisSuccess) { @@ -1091,11 +1108,12 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item, DCHECK(Runtime::Current()->IsAotCompiler()); const VerifiedMethod* verified_method = compiler_driver->GetVerifiedMethod(&dex_file, method_idx); DCHECK(!verified_method->HasRuntimeThrow()); - if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file) - || verifier::CanCompilerHandleVerificationFailure( - verified_method->GetEncounteredVerificationFailures())) { - ArenaAllocator arena(Runtime::Current()->GetArenaPool()); - CodeVectorAllocator code_allocator(&arena); + if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file) || + verifier::CanCompilerHandleVerificationFailure( + verified_method->GetEncounteredVerificationFailures())) { + ArenaAllocator allocator(Runtime::Current()->GetArenaPool()); + ArenaStack arena_stack(Runtime::Current()->GetArenaPool()); + CodeVectorAllocator code_allocator(&allocator); std::unique_ptr<CodeGenerator> codegen; { ScopedObjectAccess soa(Thread::Current()); @@ -1103,7 +1121,8 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item, // Go to native so that we don't block GC during compilation. ScopedThreadSuspension sts(soa.Self(), kNative); codegen.reset( - TryCompile(&arena, + TryCompile(&allocator, + &arena_stack, &code_allocator, code_item, access_flags, @@ -1120,12 +1139,16 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item, if (codegen.get() != nullptr) { MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiled); - method = Emit(&arena, &code_allocator, codegen.get(), compiler_driver, code_item); + method = Emit(&allocator, &code_allocator, codegen.get(), compiler_driver, code_item); if (kArenaAllocatorCountAllocations) { - if (arena.BytesAllocated() > kArenaAllocatorMemoryReportThreshold) { - MemStats mem_stats(arena.GetMemStats()); - LOG(INFO) << dex_file.PrettyMethod(method_idx) << " " << Dumpable<MemStats>(mem_stats); + size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated(); + if (total_allocated > kArenaAllocatorMemoryReportThreshold) { + MemStats mem_stats(allocator.GetMemStats()); + MemStats peak_stats(arena_stack.GetPeakStats()); + LOG(INFO) << dex_file.PrettyMethod(method_idx) + << "\n" << Dumpable<MemStats>(mem_stats) + << "\n" << Dumpable<MemStats>(peak_stats); } } } @@ -1200,8 +1223,9 @@ bool OptimizingCompiler::JitCompile(Thread* self, const uint32_t access_flags = method->GetAccessFlags(); const InvokeType invoke_type = method->GetInvokeType(); - ArenaAllocator arena(Runtime::Current()->GetJitArenaPool()); - CodeVectorAllocator code_allocator(&arena); + ArenaAllocator allocator(Runtime::Current()->GetJitArenaPool()); + ArenaStack arena_stack(Runtime::Current()->GetJitArenaPool()); + CodeVectorAllocator code_allocator(&allocator); VariableSizedHandleScope handles(self); std::unique_ptr<CodeGenerator> codegen; @@ -1209,7 +1233,8 @@ bool OptimizingCompiler::JitCompile(Thread* self, // Go to native so that we don't block GC during compilation. ScopedThreadSuspension sts(self, kNative); codegen.reset( - TryCompile(&arena, + TryCompile(&allocator, + &arena_stack, &code_allocator, code_item, access_flags, @@ -1227,9 +1252,13 @@ bool OptimizingCompiler::JitCompile(Thread* self, } if (kArenaAllocatorCountAllocations) { - if (arena.BytesAllocated() > kArenaAllocatorMemoryReportThreshold) { - MemStats mem_stats(arena.GetMemStats()); - LOG(INFO) << dex_file->PrettyMethod(method_idx) << " " << Dumpable<MemStats>(mem_stats); + size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated(); + if (total_allocated > kArenaAllocatorMemoryReportThreshold) { + MemStats mem_stats(allocator.GetMemStats()); + MemStats peak_stats(arena_stack.GetPeakStats()); + LOG(INFO) << dex_file->PrettyMethod(method_idx) + << "\n" << Dumpable<MemStats>(mem_stats) + << "\n" << Dumpable<MemStats>(peak_stats); } } } @@ -1321,7 +1350,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, CreateJITCodeEntryForAddress(code_address, std::move(elf_file)); } - Runtime::Current()->GetJit()->AddMemoryUsage(method, arena.BytesUsed()); + Runtime::Current()->GetJit()->AddMemoryUsage(method, allocator.BytesUsed()); if (jit_logger != nullptr) { jit_logger->WriteLog(code, code_allocator.GetSize(), method); } diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h index 33f1a4affe..5632f9a453 100644 --- a/compiler/optimizing/optimizing_unit_test.h +++ b/compiler/optimizing/optimizing_unit_test.h @@ -17,6 +17,7 @@ #ifndef ART_COMPILER_OPTIMIZING_OPTIMIZING_UNIT_TEST_H_ #define ART_COMPILER_OPTIMIZING_OPTIMIZING_UNIT_TEST_H_ +#include "base/scoped_arena_allocator.h" #include "builder.h" #include "common_compiler_test.h" #include "dex_file.h" @@ -47,7 +48,7 @@ namespace art { LiveInterval* BuildInterval(const size_t ranges[][2], size_t number_of_ranges, - ArenaAllocator* allocator, + ScopedArenaAllocator* allocator, int reg = -1, HInstruction* defined_by = nullptr) { LiveInterval* interval = @@ -78,30 +79,69 @@ void RemoveSuspendChecks(HGraph* graph) { } } -inline HGraph* CreateGraph(ArenaAllocator* allocator) { - return new (allocator) HGraph( - allocator, - *reinterpret_cast<DexFile*>(allocator->Alloc(sizeof(DexFile))), +class ArenaPoolAndAllocator { + public: + ArenaPoolAndAllocator() + : pool_(), allocator_(&pool_), arena_stack_(&pool_), scoped_allocator_(&arena_stack_) { } + + ArenaAllocator* GetAllocator() { return &allocator_; } + ArenaStack* GetArenaStack() { return &arena_stack_; } + ScopedArenaAllocator* GetScopedAllocator() { return &scoped_allocator_; } + + private: + ArenaPool pool_; + ArenaAllocator allocator_; + ArenaStack arena_stack_; + ScopedArenaAllocator scoped_allocator_; +}; + +inline HGraph* CreateGraph(ArenaPoolAndAllocator* pool_and_allocator) { + return new (pool_and_allocator->GetAllocator()) HGraph( + pool_and_allocator->GetAllocator(), + pool_and_allocator->GetArenaStack(), + *reinterpret_cast<DexFile*>(pool_and_allocator->GetAllocator()->Alloc(sizeof(DexFile))), /*method_idx*/-1, kRuntimeISA); } -// Create a control-flow graph from Dex instructions. -inline HGraph* CreateCFG(ArenaAllocator* allocator, - const uint16_t* data, - DataType::Type return_type = DataType::Type::kInt32) { - const DexFile::CodeItem* item = - reinterpret_cast<const DexFile::CodeItem*>(data); - HGraph* graph = CreateGraph(allocator); - - { - ScopedObjectAccess soa(Thread::Current()); - VariableSizedHandleScope handles(soa.Self()); - HGraphBuilder builder(graph, *item, &handles, return_type); - bool graph_built = (builder.BuildGraph() == kAnalysisSuccess); - return graph_built ? graph : nullptr; +class OptimizingUnitTest : public CommonCompilerTest { + protected: + OptimizingUnitTest() : pool_and_allocator_(new ArenaPoolAndAllocator()) { } + + ArenaAllocator* GetAllocator() { return pool_and_allocator_->GetAllocator(); } + ArenaStack* GetArenaStack() { return pool_and_allocator_->GetArenaStack(); } + ScopedArenaAllocator* GetScopedAllocator() { return pool_and_allocator_->GetScopedAllocator(); } + + void ResetPoolAndAllocator() { + pool_and_allocator_.reset(new ArenaPoolAndAllocator()); + handles_.reset(); // When getting rid of the old HGraph, we can also reset handles_. } -} + + HGraph* CreateGraph() { + return art::CreateGraph(pool_and_allocator_.get()); + } + + // Create a control-flow graph from Dex instructions. + HGraph* CreateCFG(const uint16_t* data, DataType::Type return_type = DataType::Type::kInt32) { + const DexFile::CodeItem* item = + reinterpret_cast<const DexFile::CodeItem*>(data); + HGraph* graph = CreateGraph(); + + { + ScopedObjectAccess soa(Thread::Current()); + if (handles_ == nullptr) { + handles_.reset(new VariableSizedHandleScope(soa.Self())); + } + HGraphBuilder builder(graph, *item, handles_.get(), return_type); + bool graph_built = (builder.BuildGraph() == kAnalysisSuccess); + return graph_built ? graph : nullptr; + } + } + + private: + std::unique_ptr<ArenaPoolAndAllocator> pool_and_allocator_; + std::unique_ptr<VariableSizedHandleScope> handles_; +}; // Naive string diff data type. typedef std::list<std::pair<std::string, std::string>> diff_t; diff --git a/compiler/optimizing/pc_relative_fixups_mips.cc b/compiler/optimizing/pc_relative_fixups_mips.cc index e569b78c9d..9d5358514e 100644 --- a/compiler/optimizing/pc_relative_fixups_mips.cc +++ b/compiler/optimizing/pc_relative_fixups_mips.cc @@ -52,7 +52,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor { } // Insert the base at the start of the entry block, move it to a better // position later in MoveBaseIfNeeded(). - base_ = new (GetGraph()->GetArena()) HMipsComputeBaseMethodAddress(); + base_ = new (GetGraph()->GetAllocator()) HMipsComputeBaseMethodAddress(); HBasicBlock* entry_block = GetGraph()->GetEntryBlock(); entry_block->InsertInstructionBefore(base_, entry_block->GetFirstInstruction()); DCHECK(base_ != nullptr); @@ -112,7 +112,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor { InitializePCRelativeBasePointer(); HGraph* graph = GetGraph(); HBasicBlock* block = switch_insn->GetBlock(); - HMipsPackedSwitch* mips_switch = new (graph->GetArena()) HMipsPackedSwitch( + HMipsPackedSwitch* mips_switch = new (graph->GetAllocator()) HMipsPackedSwitch( switch_insn->GetStartValue(), switch_insn->GetNumEntries(), switch_insn->InputAt(0), diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc index a114e78eb4..f92f4b274a 100644 --- a/compiler/optimizing/pc_relative_fixups_x86.cc +++ b/compiler/optimizing/pc_relative_fixups_x86.cc @@ -137,7 +137,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor { HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(neg); HGraph* graph = GetGraph(); HBasicBlock* block = neg->GetBlock(); - HX86FPNeg* x86_fp_neg = new (graph->GetArena()) HX86FPNeg( + HX86FPNeg* x86_fp_neg = new (graph->GetAllocator()) HX86FPNeg( neg->GetType(), neg->InputAt(0), method_address, @@ -156,7 +156,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor { HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(switch_insn); HGraph* graph = GetGraph(); HBasicBlock* block = switch_insn->GetBlock(); - HX86PackedSwitch* x86_switch = new (graph->GetArena()) HX86PackedSwitch( + HX86PackedSwitch* x86_switch = new (graph->GetAllocator()) HX86PackedSwitch( switch_insn->GetStartValue(), switch_insn->GetNumEntries(), switch_insn->InputAt(0), @@ -176,7 +176,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor { // Insert the base at the start of the entry block, move it to a better // position later in MoveBaseIfNeeded(). HX86ComputeBaseMethodAddress* method_address = - new (GetGraph()->GetArena()) HX86ComputeBaseMethodAddress(); + new (GetGraph()->GetAllocator()) HX86ComputeBaseMethodAddress(); if (has_irreducible_loops) { cursor->GetBlock()->InsertInstructionBefore(method_address, cursor); } else { @@ -190,7 +190,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor { void ReplaceInput(HInstruction* insn, HConstant* value, int input_index, bool materialize) { HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(insn); HX86LoadFromConstantTable* load_constant = - new (GetGraph()->GetArena()) HX86LoadFromConstantTable(method_address, value); + new (GetGraph()->GetAllocator()) HX86LoadFromConstantTable(method_address, value); if (!materialize) { load_constant->MarkEmittedAtUseSite(); } diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc index b52de367d1..fe98aa9561 100644 --- a/compiler/optimizing/prepare_for_register_allocation.cc +++ b/compiler/optimizing/prepare_for_register_allocation.cc @@ -56,12 +56,12 @@ void PrepareForRegisterAllocation::VisitBoundsCheck(HBoundsCheck* check) { // Add a fake environment for String.charAt() inline info as we want // the exception to appear as being thrown from there. ArtMethod* char_at_method = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt); - ArenaAllocator* arena = GetGraph()->GetArena(); - HEnvironment* environment = new (arena) HEnvironment(arena, - /* number_of_vregs */ 0u, - char_at_method, - /* dex_pc */ dex::kDexNoIndex, - check); + ArenaAllocator* allocator = GetGraph()->GetAllocator(); + HEnvironment* environment = new (allocator) HEnvironment(allocator, + /* number_of_vregs */ 0u, + char_at_method, + /* dex_pc */ dex::kDexNoIndex, + check); check->InsertRawEnvironment(environment); } } diff --git a/compiler/optimizing/pretty_printer_test.cc b/compiler/optimizing/pretty_printer_test.cc index 14d2360392..4aec6d3999 100644 --- a/compiler/optimizing/pretty_printer_test.cc +++ b/compiler/optimizing/pretty_printer_test.cc @@ -27,17 +27,18 @@ namespace art { -static void TestCode(const uint16_t* data, const char* expected) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - HGraph* graph = CreateCFG(&allocator, data); +class PrettyPrinterTest : public OptimizingUnitTest { + protected: + void TestCode(const uint16_t* data, const char* expected); +}; + +void PrettyPrinterTest::TestCode(const uint16_t* data, const char* expected) { + HGraph* graph = CreateCFG(data); StringPrettyPrinter printer(graph); printer.VisitInsertionOrder(); ASSERT_STREQ(expected, printer.str().c_str()); } -class PrettyPrinterTest : public CommonCompilerTest {}; - TEST_F(PrettyPrinterTest, ReturnVoid) { const uint16_t data[] = ZERO_REGISTER_CODE_ITEM( Instruction::RETURN_VOID); diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc index f5064c3057..6d9ebc8d91 100644 --- a/compiler/optimizing/reference_type_propagation.cc +++ b/compiler/optimizing/reference_type_propagation.cc @@ -122,7 +122,7 @@ ReferenceTypePropagation::ReferenceTypePropagation(HGraph* graph, class_loader_(class_loader), hint_dex_cache_(hint_dex_cache), handle_cache_(handles), - worklist_(graph->GetArena()->Adapter(kArenaAllocReferenceTypePropagation)), + worklist_(graph->GetAllocator()->Adapter(kArenaAllocReferenceTypePropagation)), is_first_run_(is_first_run) { } @@ -235,7 +235,7 @@ static void BoundTypeIn(HInstruction* receiver, : start_block->GetFirstInstruction(); if (ShouldCreateBoundType( insert_point, receiver, class_rti, start_instruction, start_block)) { - bound_type = new (receiver->GetBlock()->GetGraph()->GetArena()) HBoundType(receiver); + bound_type = new (receiver->GetBlock()->GetGraph()->GetAllocator()) HBoundType(receiver); bound_type->SetUpperBound(class_rti, /* bound_can_be_null */ false); start_block->InsertInstructionBefore(bound_type, insert_point); // To comply with the RTP algorithm, don't type the bound type just yet, it will diff --git a/compiler/optimizing/reference_type_propagation_test.cc b/compiler/optimizing/reference_type_propagation_test.cc index cb2af91d87..028b6d3b79 100644 --- a/compiler/optimizing/reference_type_propagation_test.cc +++ b/compiler/optimizing/reference_type_propagation_test.cc @@ -28,22 +28,20 @@ namespace art { * Fixture class for unit testing the ReferenceTypePropagation phase. Used to verify the * functionality of methods and situations that are hard to set up with checker tests. */ -class ReferenceTypePropagationTest : public CommonCompilerTest { +class ReferenceTypePropagationTest : public OptimizingUnitTest { public: - ReferenceTypePropagationTest() : pool_(), allocator_(&pool_), propagation_(nullptr) { - graph_ = CreateGraph(&allocator_); - } + ReferenceTypePropagationTest() : graph_(CreateGraph()), propagation_(nullptr) { } ~ReferenceTypePropagationTest() { } void SetupPropagation(VariableSizedHandleScope* handles) { graph_->InitializeInexactObjectRTI(handles); - propagation_ = new (&allocator_) ReferenceTypePropagation(graph_, - Handle<mirror::ClassLoader>(), - Handle<mirror::DexCache>(), - handles, - true, - "test_prop"); + propagation_ = new (GetAllocator()) ReferenceTypePropagation(graph_, + Handle<mirror::ClassLoader>(), + Handle<mirror::DexCache>(), + handles, + true, + "test_prop"); } // Relay method to merge type in reference type propagation. @@ -68,8 +66,6 @@ class ReferenceTypePropagationTest : public CommonCompilerTest { } // General building fields. - ArenaPool pool_; - ArenaAllocator allocator_; HGraph* graph_; ReferenceTypePropagation* propagation_; diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc index 1786aa72a1..5ed9e0243f 100644 --- a/compiler/optimizing/register_allocation_resolver.cc +++ b/compiler/optimizing/register_allocation_resolver.cc @@ -22,10 +22,9 @@ namespace art { -RegisterAllocationResolver::RegisterAllocationResolver(ArenaAllocator* allocator, - CodeGenerator* codegen, +RegisterAllocationResolver::RegisterAllocationResolver(CodeGenerator* codegen, const SsaLivenessAnalysis& liveness) - : allocator_(allocator), + : allocator_(codegen->GetGraph()->GetAllocator()), codegen_(codegen), liveness_(liveness) {} @@ -36,7 +35,7 @@ void RegisterAllocationResolver::Resolve(ArrayRef<HInstruction* const> safepoint size_t float_spill_slots, size_t double_spill_slots, size_t catch_phi_spill_slots, - const ArenaVector<LiveInterval*>& temp_intervals) { + ArrayRef<LiveInterval* const> temp_intervals) { size_t spill_slots = int_spill_slots + long_spill_slots + float_spill_slots diff --git a/compiler/optimizing/register_allocation_resolver.h b/compiler/optimizing/register_allocation_resolver.h index 4a148e0abf..278371777d 100644 --- a/compiler/optimizing/register_allocation_resolver.h +++ b/compiler/optimizing/register_allocation_resolver.h @@ -17,7 +17,6 @@ #ifndef ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATION_RESOLVER_H_ #define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATION_RESOLVER_H_ -#include "base/arena_containers.h" #include "base/array_ref.h" #include "base/value_object.h" #include "data_type.h" @@ -40,9 +39,7 @@ class SsaLivenessAnalysis; */ class RegisterAllocationResolver : ValueObject { public: - RegisterAllocationResolver(ArenaAllocator* allocator, - CodeGenerator* codegen, - const SsaLivenessAnalysis& liveness); + RegisterAllocationResolver(CodeGenerator* codegen, const SsaLivenessAnalysis& liveness); void Resolve(ArrayRef<HInstruction* const> safepoints, size_t reserved_out_slots, // Includes slot(s) for the art method. @@ -51,7 +48,7 @@ class RegisterAllocationResolver : ValueObject { size_t float_spill_slots, size_t double_spill_slots, size_t catch_phi_spill_slots, - const ArenaVector<LiveInterval*>& temp_intervals); + ArrayRef<LiveInterval* const> temp_intervals); private: // Update live registers of safepoint location summary. diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc index c3b33e29d7..ece9904426 100644 --- a/compiler/optimizing/register_allocator.cc +++ b/compiler/optimizing/register_allocator.cc @@ -19,6 +19,8 @@ #include <iostream> #include <sstream> +#include "base/scoped_arena_allocator.h" +#include "base/scoped_arena_containers.h" #include "base/bit_vector-inl.h" #include "code_generator.h" #include "register_allocator_graph_color.h" @@ -27,22 +29,24 @@ namespace art { -RegisterAllocator::RegisterAllocator(ArenaAllocator* allocator, +RegisterAllocator::RegisterAllocator(ScopedArenaAllocator* allocator, CodeGenerator* codegen, const SsaLivenessAnalysis& liveness) : allocator_(allocator), codegen_(codegen), liveness_(liveness) {} -RegisterAllocator* RegisterAllocator::Create(ArenaAllocator* allocator, - CodeGenerator* codegen, - const SsaLivenessAnalysis& analysis, - Strategy strategy) { +std::unique_ptr<RegisterAllocator> RegisterAllocator::Create(ScopedArenaAllocator* allocator, + CodeGenerator* codegen, + const SsaLivenessAnalysis& analysis, + Strategy strategy) { switch (strategy) { case kRegisterAllocatorLinearScan: - return new (allocator) RegisterAllocatorLinearScan(allocator, codegen, analysis); + return std::unique_ptr<RegisterAllocator>( + new (allocator) RegisterAllocatorLinearScan(allocator, codegen, analysis)); case kRegisterAllocatorGraphColor: - return new (allocator) RegisterAllocatorGraphColor(allocator, codegen, analysis); + return std::unique_ptr<RegisterAllocator>( + new (allocator) RegisterAllocatorGraphColor(allocator, codegen, analysis)); default: LOG(FATAL) << "Invalid register allocation strategy: " << strategy; UNREACHABLE(); @@ -87,18 +91,18 @@ class AllRangesIterator : public ValueObject { DISALLOW_COPY_AND_ASSIGN(AllRangesIterator); }; -bool RegisterAllocator::ValidateIntervals(const ArenaVector<LiveInterval*>& intervals, +bool RegisterAllocator::ValidateIntervals(ArrayRef<LiveInterval* const> intervals, size_t number_of_spill_slots, size_t number_of_out_slots, const CodeGenerator& codegen, - ArenaAllocator* allocator, bool processing_core_registers, bool log_fatal_on_failure) { size_t number_of_registers = processing_core_registers ? codegen.GetNumberOfCoreRegisters() : codegen.GetNumberOfFloatingPointRegisters(); - ArenaVector<ArenaBitVector*> liveness_of_values( - allocator->Adapter(kArenaAllocRegisterAllocatorValidate)); + ScopedArenaAllocator allocator(codegen.GetGraph()->GetArenaStack()); + ScopedArenaVector<ArenaBitVector*> liveness_of_values( + allocator.Adapter(kArenaAllocRegisterAllocatorValidate)); liveness_of_values.reserve(number_of_registers + number_of_spill_slots); size_t max_end = 0u; @@ -112,7 +116,8 @@ bool RegisterAllocator::ValidateIntervals(const ArenaVector<LiveInterval*>& inte // allocated will populate the associated bit vector based on its live ranges. for (size_t i = 0; i < number_of_registers + number_of_spill_slots; ++i) { liveness_of_values.push_back( - ArenaBitVector::Create(allocator, max_end, false, kArenaAllocRegisterAllocatorValidate)); + ArenaBitVector::Create(&allocator, max_end, false, kArenaAllocRegisterAllocatorValidate)); + liveness_of_values.back()->ClearAllBits(); } for (LiveInterval* start_interval : intervals) { diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h index 4375d6851a..eaeec3b261 100644 --- a/compiler/optimizing/register_allocator.h +++ b/compiler/optimizing/register_allocator.h @@ -18,7 +18,7 @@ #define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_H_ #include "arch/instruction_set.h" -#include "base/arena_containers.h" +#include "base/array_ref.h" #include "base/arena_object.h" #include "base/macros.h" @@ -36,7 +36,7 @@ class SsaLivenessAnalysis; /** * Base class for any register allocator. */ -class RegisterAllocator : public ArenaObject<kArenaAllocRegisterAllocator> { +class RegisterAllocator : public DeletableArenaObject<kArenaAllocRegisterAllocator> { public: enum Strategy { kRegisterAllocatorLinearScan, @@ -45,10 +45,10 @@ class RegisterAllocator : public ArenaObject<kArenaAllocRegisterAllocator> { static constexpr Strategy kRegisterAllocatorDefault = kRegisterAllocatorLinearScan; - static RegisterAllocator* Create(ArenaAllocator* allocator, - CodeGenerator* codegen, - const SsaLivenessAnalysis& analysis, - Strategy strategy = kRegisterAllocatorDefault); + static std::unique_ptr<RegisterAllocator> Create(ScopedArenaAllocator* allocator, + CodeGenerator* codegen, + const SsaLivenessAnalysis& analysis, + Strategy strategy = kRegisterAllocatorDefault); virtual ~RegisterAllocator() = default; @@ -64,18 +64,17 @@ class RegisterAllocator : public ArenaObject<kArenaAllocRegisterAllocator> { InstructionSet instruction_set); // Verifies that live intervals do not conflict. Used by unit testing. - static bool ValidateIntervals(const ArenaVector<LiveInterval*>& intervals, + static bool ValidateIntervals(ArrayRef<LiveInterval* const> intervals, size_t number_of_spill_slots, size_t number_of_out_slots, const CodeGenerator& codegen, - ArenaAllocator* allocator, bool processing_core_registers, bool log_fatal_on_failure); static constexpr const char* kRegisterAllocatorPassName = "register"; protected: - RegisterAllocator(ArenaAllocator* allocator, + RegisterAllocator(ScopedArenaAllocator* allocator, CodeGenerator* codegen, const SsaLivenessAnalysis& analysis); @@ -88,7 +87,7 @@ class RegisterAllocator : public ArenaObject<kArenaAllocRegisterAllocator> { // to find an optimal split position. LiveInterval* SplitBetween(LiveInterval* interval, size_t from, size_t to); - ArenaAllocator* const allocator_; + ScopedArenaAllocator* allocator_; CodeGenerator* const codegen_; const SsaLivenessAnalysis& liveness_; }; diff --git a/compiler/optimizing/register_allocator_graph_color.cc b/compiler/optimizing/register_allocator_graph_color.cc index 33df607831..ad5248e982 100644 --- a/compiler/optimizing/register_allocator_graph_color.cc +++ b/compiler/optimizing/register_allocator_graph_color.cc @@ -217,13 +217,12 @@ static float ComputeSpillWeight(LiveInterval* interval, const SsaLivenessAnalysi // and thus whether it is safe to prune it from the interference graph early on. class InterferenceNode : public ArenaObject<kArenaAllocRegisterAllocator> { public: - InterferenceNode(ArenaAllocator* allocator, - LiveInterval* interval, + InterferenceNode(LiveInterval* interval, const SsaLivenessAnalysis& liveness) : stage(NodeStage::kInitial), interval_(interval), - adjacent_nodes_(allocator->Adapter(kArenaAllocRegisterAllocator)), - coalesce_opportunities_(allocator->Adapter(kArenaAllocRegisterAllocator)), + adjacent_nodes_(nullptr), + coalesce_opportunities_(nullptr), out_degree_(interval->HasRegister() ? std::numeric_limits<size_t>::max() : 0), alias_(this), spill_weight_(ComputeSpillWeight(interval, liveness)), @@ -232,21 +231,26 @@ class InterferenceNode : public ArenaObject<kArenaAllocRegisterAllocator> { DCHECK(!interval->IsHighInterval()) << "Pair nodes should be represented by the low interval"; } - void AddInterference(InterferenceNode* other, bool guaranteed_not_interfering_yet) { + void AddInterference(InterferenceNode* other, + bool guaranteed_not_interfering_yet, + ScopedArenaDeque<ScopedArenaVector<InterferenceNode*>>* storage) { DCHECK(!IsPrecolored()) << "To save memory, fixed nodes should not have outgoing interferences"; DCHECK_NE(this, other) << "Should not create self loops in the interference graph"; DCHECK_EQ(this, alias_) << "Should not add interferences to a node that aliases another"; DCHECK_NE(stage, NodeStage::kPruned); DCHECK_NE(other->stage, NodeStage::kPruned); + if (adjacent_nodes_ == nullptr) { + ScopedArenaVector<InterferenceNode*>::allocator_type adapter(storage->get_allocator()); + storage->emplace_back(adapter); + adjacent_nodes_ = &storage->back(); + } if (guaranteed_not_interfering_yet) { - DCHECK(std::find(adjacent_nodes_.begin(), adjacent_nodes_.end(), other) - == adjacent_nodes_.end()); - adjacent_nodes_.push_back(other); + DCHECK(!ContainsElement(GetAdjacentNodes(), other)); + adjacent_nodes_->push_back(other); out_degree_ += EdgeWeightWith(other); } else { - auto it = std::find(adjacent_nodes_.begin(), adjacent_nodes_.end(), other); - if (it == adjacent_nodes_.end()) { - adjacent_nodes_.push_back(other); + if (!ContainsElement(GetAdjacentNodes(), other)) { + adjacent_nodes_->push_back(other); out_degree_ += EdgeWeightWith(other); } } @@ -255,26 +259,29 @@ class InterferenceNode : public ArenaObject<kArenaAllocRegisterAllocator> { void RemoveInterference(InterferenceNode* other) { DCHECK_EQ(this, alias_) << "Should not remove interferences from a coalesced node"; DCHECK_EQ(other->stage, NodeStage::kPruned) << "Should only remove interferences when pruning"; - auto it = std::find(adjacent_nodes_.begin(), adjacent_nodes_.end(), other); - if (it != adjacent_nodes_.end()) { - adjacent_nodes_.erase(it); - out_degree_ -= EdgeWeightWith(other); + if (adjacent_nodes_ != nullptr) { + auto it = std::find(adjacent_nodes_->begin(), adjacent_nodes_->end(), other); + if (it != adjacent_nodes_->end()) { + adjacent_nodes_->erase(it); + out_degree_ -= EdgeWeightWith(other); + } } } bool ContainsInterference(InterferenceNode* other) const { DCHECK(!IsPrecolored()) << "Should not query fixed nodes for interferences"; DCHECK_EQ(this, alias_) << "Should not query a coalesced node for interferences"; - auto it = std::find(adjacent_nodes_.begin(), adjacent_nodes_.end(), other); - return it != adjacent_nodes_.end(); + return ContainsElement(GetAdjacentNodes(), other); } LiveInterval* GetInterval() const { return interval_; } - const ArenaVector<InterferenceNode*>& GetAdjacentNodes() const { - return adjacent_nodes_; + ArrayRef<InterferenceNode*> GetAdjacentNodes() const { + return adjacent_nodes_ != nullptr + ? ArrayRef<InterferenceNode*>(*adjacent_nodes_) + : ArrayRef<InterferenceNode*>(); } size_t GetOutDegree() const { @@ -283,16 +290,22 @@ class InterferenceNode : public ArenaObject<kArenaAllocRegisterAllocator> { return out_degree_; } - void AddCoalesceOpportunity(CoalesceOpportunity* opportunity) { - coalesce_opportunities_.push_back(opportunity); + void AddCoalesceOpportunity(CoalesceOpportunity* opportunity, + ScopedArenaDeque<ScopedArenaVector<CoalesceOpportunity*>>* storage) { + if (coalesce_opportunities_ == nullptr) { + ScopedArenaVector<CoalesceOpportunity*>::allocator_type adapter(storage->get_allocator()); + storage->emplace_back(adapter); + coalesce_opportunities_ = &storage->back(); + } + coalesce_opportunities_->push_back(opportunity); } void ClearCoalesceOpportunities() { - coalesce_opportunities_.clear(); + coalesce_opportunities_ = nullptr; } bool IsMoveRelated() const { - for (CoalesceOpportunity* opportunity : coalesce_opportunities_) { + for (CoalesceOpportunity* opportunity : GetCoalesceOpportunities()) { if (opportunity->stage == CoalesceStage::kWorklist || opportunity->stage == CoalesceStage::kActive) { return true; @@ -325,8 +338,10 @@ class InterferenceNode : public ArenaObject<kArenaAllocRegisterAllocator> { return alias_; } - const ArenaVector<CoalesceOpportunity*>& GetCoalesceOpportunities() const { - return coalesce_opportunities_; + ArrayRef<CoalesceOpportunity*> GetCoalesceOpportunities() const { + return coalesce_opportunities_ != nullptr + ? ArrayRef<CoalesceOpportunity*>(*coalesce_opportunities_) + : ArrayRef<CoalesceOpportunity*>(); } float GetSpillWeight() const { @@ -361,10 +376,10 @@ class InterferenceNode : public ArenaObject<kArenaAllocRegisterAllocator> { // All nodes interfering with this one. // We use an unsorted vector as a set, since a tree or hash set is too heavy for the // set sizes that we encounter. Using a vector leads to much better performance. - ArenaVector<InterferenceNode*> adjacent_nodes_; + ScopedArenaVector<InterferenceNode*>* adjacent_nodes_; // Owned by ColoringIteration. // Interference nodes that this node should be coalesced with to reduce moves. - ArenaVector<CoalesceOpportunity*> coalesce_opportunities_; + ScopedArenaVector<CoalesceOpportunity*>* coalesce_opportunities_; // Owned by ColoringIteration. // The maximum number of colors with which this node could interfere. This could be more than // the number of adjacent nodes if this is a pair node, or if some adjacent nodes are pair nodes. @@ -416,7 +431,7 @@ static bool HasGreaterNodePriority(const InterferenceNode* lhs, class ColoringIteration { public: ColoringIteration(RegisterAllocatorGraphColor* register_allocator, - ArenaAllocator* allocator, + ScopedArenaAllocator* allocator, bool processing_core_regs, size_t num_regs) : register_allocator_(register_allocator), @@ -430,15 +445,17 @@ class ColoringIteration { freeze_worklist_(allocator->Adapter(kArenaAllocRegisterAllocator)), spill_worklist_(HasGreaterNodePriority, allocator->Adapter(kArenaAllocRegisterAllocator)), coalesce_worklist_(CoalesceOpportunity::CmpPriority, - allocator->Adapter(kArenaAllocRegisterAllocator)) {} + allocator->Adapter(kArenaAllocRegisterAllocator)), + adjacent_nodes_links_(allocator->Adapter(kArenaAllocRegisterAllocator)), + coalesce_opportunities_links_(allocator->Adapter(kArenaAllocRegisterAllocator)) {} // Use the intervals collected from instructions to construct an // interference graph mapping intervals to adjacency lists. // Also, collect synthesized safepoint nodes, used to keep // track of live intervals across safepoints. // TODO: Should build safepoints elsewhere. - void BuildInterferenceGraph(const ArenaVector<LiveInterval*>& intervals, - const ArenaVector<InterferenceNode*>& physical_nodes); + void BuildInterferenceGraph(const ScopedArenaVector<LiveInterval*>& intervals, + const ScopedArenaVector<InterferenceNode*>& physical_nodes); // Add coalesce opportunities to interference nodes. void FindCoalesceOpportunities(); @@ -456,8 +473,8 @@ class ColoringIteration { // Return prunable nodes. // The register allocator will need to access prunable nodes after coloring // in order to tell the code generator which registers have been assigned. - const ArenaVector<InterferenceNode*>& GetPrunableNodes() const { - return prunable_nodes_; + ArrayRef<InterferenceNode* const> GetPrunableNodes() const { + return ArrayRef<InterferenceNode* const>(prunable_nodes_); } private: @@ -503,38 +520,46 @@ class ColoringIteration { // needed to split intervals and assign spill slots. RegisterAllocatorGraphColor* register_allocator_; - // An arena allocator used for a single graph coloring attempt. - ArenaAllocator* allocator_; + // A scoped arena allocator used for a single graph coloring attempt. + ScopedArenaAllocator* allocator_; const bool processing_core_regs_; const size_t num_regs_; // A map from live intervals to interference nodes. - ArenaHashMap<LiveInterval*, InterferenceNode*> interval_node_map_; + ScopedArenaHashMap<LiveInterval*, InterferenceNode*> interval_node_map_; // Uncolored nodes that should be pruned from the interference graph. - ArenaVector<InterferenceNode*> prunable_nodes_; + ScopedArenaVector<InterferenceNode*> prunable_nodes_; // A stack of nodes pruned from the interference graph, waiting to be pruned. - ArenaStdStack<InterferenceNode*> pruned_nodes_; + ScopedArenaStdStack<InterferenceNode*> pruned_nodes_; // A queue containing low degree, non-move-related nodes that can pruned immediately. - ArenaDeque<InterferenceNode*> simplify_worklist_; + ScopedArenaDeque<InterferenceNode*> simplify_worklist_; // A queue containing low degree, move-related nodes. - ArenaDeque<InterferenceNode*> freeze_worklist_; + ScopedArenaDeque<InterferenceNode*> freeze_worklist_; // A queue containing high degree nodes. // If we have to prune from the spill worklist, we cannot guarantee // the pruned node a color, so we order the worklist by priority. - ArenaPriorityQueue<InterferenceNode*, decltype(&HasGreaterNodePriority)> spill_worklist_; + ScopedArenaPriorityQueue<InterferenceNode*, decltype(&HasGreaterNodePriority)> spill_worklist_; // A queue containing coalesce opportunities. // We order the coalesce worklist by priority, since some coalesce opportunities (e.g., those // inside of loops) are more important than others. - ArenaPriorityQueue<CoalesceOpportunity*, - decltype(&CoalesceOpportunity::CmpPriority)> coalesce_worklist_; + ScopedArenaPriorityQueue<CoalesceOpportunity*, + decltype(&CoalesceOpportunity::CmpPriority)> coalesce_worklist_; + + // Storage for links to adjacent nodes for interference nodes. + // Using std::deque so that elements do not move when adding new ones. + ScopedArenaDeque<ScopedArenaVector<InterferenceNode*>> adjacent_nodes_links_; + + // Storage for links to coalesce opportunities for interference nodes. + // Using std::deque so that elements do not move when adding new ones. + ScopedArenaDeque<ScopedArenaVector<CoalesceOpportunity*>> coalesce_opportunities_links_; DISALLOW_COPY_AND_ASSIGN(ColoringIteration); }; @@ -547,7 +572,7 @@ static size_t ComputeReservedArtMethodSlots(const CodeGenerator& codegen) { return static_cast<size_t>(InstructionSetPointerSize(codegen.GetInstructionSet())) / kVRegSize; } -RegisterAllocatorGraphColor::RegisterAllocatorGraphColor(ArenaAllocator* allocator, +RegisterAllocatorGraphColor::RegisterAllocatorGraphColor(ScopedArenaAllocator* allocator, CodeGenerator* codegen, const SsaLivenessAnalysis& liveness, bool iterative_move_coalescing) @@ -574,8 +599,7 @@ RegisterAllocatorGraphColor::RegisterAllocatorGraphColor(ArenaAllocator* allocat physical_core_nodes_.resize(codegen_->GetNumberOfCoreRegisters(), nullptr); for (size_t i = 0; i < codegen_->GetNumberOfCoreRegisters(); ++i) { LiveInterval* interval = LiveInterval::MakeFixedInterval(allocator_, i, DataType::Type::kInt32); - physical_core_nodes_[i] = - new (allocator_) InterferenceNode(allocator_, interval, liveness); + physical_core_nodes_[i] = new (allocator_) InterferenceNode(interval, liveness); physical_core_nodes_[i]->stage = NodeStage::kPrecolored; core_intervals_.push_back(interval); if (codegen_->IsBlockedCoreRegister(i)) { @@ -587,8 +611,7 @@ RegisterAllocatorGraphColor::RegisterAllocatorGraphColor(ArenaAllocator* allocat for (size_t i = 0; i < codegen_->GetNumberOfFloatingPointRegisters(); ++i) { LiveInterval* interval = LiveInterval::MakeFixedInterval(allocator_, i, DataType::Type::kFloat32); - physical_fp_nodes_[i] = - new (allocator_) InterferenceNode(allocator_, interval, liveness); + physical_fp_nodes_[i] = new (allocator_) InterferenceNode(interval, liveness); physical_fp_nodes_[i]->stage = NodeStage::kPrecolored; fp_intervals_.push_back(interval); if (codegen_->IsBlockedFloatingPointRegister(i)) { @@ -597,12 +620,14 @@ RegisterAllocatorGraphColor::RegisterAllocatorGraphColor(ArenaAllocator* allocat } } +RegisterAllocatorGraphColor::~RegisterAllocatorGraphColor() {} + void RegisterAllocatorGraphColor::AllocateRegisters() { // (1) Collect and prepare live intervals. ProcessInstructions(); for (bool processing_core_regs : {true, false}) { - ArenaVector<LiveInterval*>& intervals = processing_core_regs + ScopedArenaVector<LiveInterval*>& intervals = processing_core_regs ? core_intervals_ : fp_intervals_; size_t num_registers = processing_core_regs @@ -619,17 +644,15 @@ void RegisterAllocatorGraphColor::AllocateRegisters() { << "should be prioritized over long ones, because they cannot be split further.)"; // Many data structures are cleared between graph coloring attempts, so we reduce - // total memory usage by using a new arena allocator for each attempt. - ArenaAllocator coloring_attempt_allocator(allocator_->GetArenaPool()); + // total memory usage by using a new scoped arena allocator for each attempt. + ScopedArenaAllocator coloring_attempt_allocator(allocator_->GetArenaStack()); ColoringIteration iteration(this, &coloring_attempt_allocator, processing_core_regs, num_registers); - // (2) Build the interference graph. Also gather safepoints. - ArenaVector<InterferenceNode*> safepoints( - coloring_attempt_allocator.Adapter(kArenaAllocRegisterAllocator)); - ArenaVector<InterferenceNode*>& physical_nodes = processing_core_regs + // (2) Build the interference graph. + ScopedArenaVector<InterferenceNode*>& physical_nodes = processing_core_regs ? physical_core_nodes_ : physical_fp_nodes_; iteration.BuildInterferenceGraph(intervals, physical_nodes); @@ -691,7 +714,7 @@ void RegisterAllocatorGraphColor::AllocateRegisters() { } // for processing_core_instructions // (6) Resolve locations and deconstruct SSA form. - RegisterAllocationResolver(allocator_, codegen_, liveness_) + RegisterAllocationResolver(codegen_, liveness_) .Resolve(ArrayRef<HInstruction* const>(safepoints_), reserved_art_method_slots_ + reserved_out_slots_, num_int_spill_slots_, @@ -699,7 +722,7 @@ void RegisterAllocatorGraphColor::AllocateRegisters() { num_float_spill_slots_, num_double_spill_slots_, catch_phi_spill_slot_counter_, - temp_intervals_); + ArrayRef<LiveInterval* const>(temp_intervals_)); if (kIsDebugBuild) { Validate(/*log_fatal_on_failure*/ true); @@ -708,8 +731,9 @@ void RegisterAllocatorGraphColor::AllocateRegisters() { bool RegisterAllocatorGraphColor::Validate(bool log_fatal_on_failure) { for (bool processing_core_regs : {true, false}) { - ArenaVector<LiveInterval*> intervals( - allocator_->Adapter(kArenaAllocRegisterAllocatorValidate)); + ScopedArenaAllocator allocator(allocator_->GetArenaStack()); + ScopedArenaVector<LiveInterval*> intervals( + allocator.Adapter(kArenaAllocRegisterAllocatorValidate)); for (size_t i = 0; i < liveness_.GetNumberOfSsaValues(); ++i) { HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i); LiveInterval* interval = instruction->GetLiveInterval(); @@ -718,7 +742,7 @@ bool RegisterAllocatorGraphColor::Validate(bool log_fatal_on_failure) { } } - ArenaVector<InterferenceNode*>& physical_nodes = processing_core_regs + ScopedArenaVector<InterferenceNode*>& physical_nodes = processing_core_regs ? physical_core_nodes_ : physical_fp_nodes_; for (InterferenceNode* fixed : physical_nodes) { @@ -742,11 +766,10 @@ bool RegisterAllocatorGraphColor::Validate(bool log_fatal_on_failure) { + num_float_spill_slots_ + num_double_spill_slots_ + catch_phi_spill_slot_counter_; - bool ok = ValidateIntervals(intervals, + bool ok = ValidateIntervals(ArrayRef<LiveInterval* const>(intervals), spill_slots, reserved_art_method_slots_ + reserved_out_slots_, *codegen_, - allocator_, processing_core_regs, log_fatal_on_failure); if (!ok) { @@ -825,7 +848,7 @@ void RegisterAllocatorGraphColor::ProcessInstruction(HInstruction* instruction) CheckForFixedOutput(instruction); AllocateSpillSlotForCatchPhi(instruction); - ArenaVector<LiveInterval*>& intervals = IsCoreInterval(interval) + ScopedArenaVector<LiveInterval*>& intervals = IsCoreInterval(interval) ? core_intervals_ : fp_intervals_; if (interval->HasSpillSlot() || instruction->IsConstant()) { @@ -1075,11 +1098,12 @@ void ColoringIteration::AddPotentialInterference(InterferenceNode* from, } else if (to->IsPrecolored()) { // It is important that only a single node represents a given fixed register in the // interference graph. We retrieve that node here. - const ArenaVector<InterferenceNode*>& physical_nodes = to->GetInterval()->IsFloatingPoint() - ? register_allocator_->physical_fp_nodes_ - : register_allocator_->physical_core_nodes_; + const ScopedArenaVector<InterferenceNode*>& physical_nodes = + to->GetInterval()->IsFloatingPoint() ? register_allocator_->physical_fp_nodes_ + : register_allocator_->physical_core_nodes_; InterferenceNode* physical_node = physical_nodes[to->GetInterval()->GetRegister()]; - from->AddInterference(physical_node, /*guaranteed_not_interfering_yet*/ false); + from->AddInterference( + physical_node, /*guaranteed_not_interfering_yet*/ false, &adjacent_nodes_links_); DCHECK_EQ(to->GetInterval()->GetRegister(), physical_node->GetInterval()->GetRegister()); DCHECK_EQ(to->GetAlias(), physical_node) << "Fixed nodes should alias the canonical fixed node"; @@ -1097,11 +1121,12 @@ void ColoringIteration::AddPotentialInterference(InterferenceNode* from, physical_nodes[to->GetInterval()->GetHighInterval()->GetRegister()]; DCHECK_EQ(to->GetInterval()->GetHighInterval()->GetRegister(), high_node->GetInterval()->GetRegister()); - from->AddInterference(high_node, /*guaranteed_not_interfering_yet*/ false); + from->AddInterference( + high_node, /*guaranteed_not_interfering_yet*/ false, &adjacent_nodes_links_); } } else { // Standard interference between two uncolored nodes. - from->AddInterference(to, guaranteed_not_interfering_yet); + from->AddInterference(to, guaranteed_not_interfering_yet, &adjacent_nodes_links_); } if (both_directions) { @@ -1156,8 +1181,8 @@ static bool CheckInputOutputCanOverlap(InterferenceNode* in_node, InterferenceNo } void ColoringIteration::BuildInterferenceGraph( - const ArenaVector<LiveInterval*>& intervals, - const ArenaVector<InterferenceNode*>& physical_nodes) { + const ScopedArenaVector<LiveInterval*>& intervals, + const ScopedArenaVector<InterferenceNode*>& physical_nodes) { DCHECK(interval_node_map_.Empty() && prunable_nodes_.empty()); // Build the interference graph efficiently by ordering range endpoints // by position and doing a linear sweep to find interferences. (That is, we @@ -1171,7 +1196,7 @@ void ColoringIteration::BuildInterferenceGraph( // // For simplicity, we create a tuple for each endpoint, and then sort the tuples. // Tuple contents: (position, is_range_beginning, node). - ArenaVector<std::tuple<size_t, bool, InterferenceNode*>> range_endpoints( + ScopedArenaVector<std::tuple<size_t, bool, InterferenceNode*>> range_endpoints( allocator_->Adapter(kArenaAllocRegisterAllocator)); // We reserve plenty of space to avoid excessive copying. @@ -1181,8 +1206,8 @@ void ColoringIteration::BuildInterferenceGraph( for (LiveInterval* sibling = parent; sibling != nullptr; sibling = sibling->GetNextSibling()) { LiveRange* range = sibling->GetFirstRange(); if (range != nullptr) { - InterferenceNode* node = new (allocator_) InterferenceNode( - allocator_, sibling, register_allocator_->liveness_); + InterferenceNode* node = + new (allocator_) InterferenceNode(sibling, register_allocator_->liveness_); interval_node_map_.Insert(std::make_pair(sibling, node)); if (sibling->HasRegister()) { @@ -1217,8 +1242,7 @@ void ColoringIteration::BuildInterferenceGraph( }); // Nodes live at the current position in the linear sweep. - ArenaVector<InterferenceNode*> live( - allocator_->Adapter(kArenaAllocRegisterAllocator)); + ScopedArenaVector<InterferenceNode*> live(allocator_->Adapter(kArenaAllocRegisterAllocator)); // Linear sweep. When we encounter the beginning of a range, we add the corresponding node to the // live set. When we encounter the end of a range, we remove the corresponding node @@ -1261,8 +1285,8 @@ void ColoringIteration::CreateCoalesceOpportunity(InterferenceNode* a, << "Nodes of different memory widths should never be coalesced"; CoalesceOpportunity* opportunity = new (allocator_) CoalesceOpportunity(a, b, kind, position, register_allocator_->liveness_); - a->AddCoalesceOpportunity(opportunity); - b->AddCoalesceOpportunity(opportunity); + a->AddCoalesceOpportunity(opportunity, &coalesce_opportunities_links_); + b->AddCoalesceOpportunity(opportunity, &coalesce_opportunities_links_); coalesce_worklist_.push(opportunity); } @@ -1332,7 +1356,7 @@ void ColoringIteration::FindCoalesceOpportunities() { // Coalesce phi inputs with the corresponding output. HInstruction* defined_by = interval->GetDefinedBy(); if (defined_by != nullptr && defined_by->IsPhi()) { - const ArenaVector<HBasicBlock*>& predecessors = defined_by->GetBlock()->GetPredecessors(); + ArrayRef<HBasicBlock* const> predecessors(defined_by->GetBlock()->GetPredecessors()); HInputsRef inputs = defined_by->GetInputs(); for (size_t i = 0, e = inputs.size(); i < e; ++i) { @@ -1675,7 +1699,7 @@ void ColoringIteration::Combine(InterferenceNode* from, // Add coalesce opportunities. for (CoalesceOpportunity* opportunity : from->GetCoalesceOpportunities()) { if (opportunity->stage != CoalesceStage::kDefunct) { - into->AddCoalesceOpportunity(opportunity); + into->AddCoalesceOpportunity(opportunity, &coalesce_opportunities_links_); } } EnableCoalesceOpportunities(from); @@ -1729,7 +1753,7 @@ void ColoringIteration::Coalesce(CoalesceOpportunity* opportunity) { // Build a mask with a bit set for each register assigned to some // interval in `intervals`. template <typename Container> -static std::bitset<kMaxNumRegs> BuildConflictMask(Container& intervals) { +static std::bitset<kMaxNumRegs> BuildConflictMask(const Container& intervals) { std::bitset<kMaxNumRegs> conflict_mask; for (InterferenceNode* adjacent : intervals) { LiveInterval* conflicting = adjacent->GetInterval(); @@ -1765,7 +1789,7 @@ static size_t FindFirstZeroInConflictMask(std::bitset<kMaxNumRegs> conflict_mask bool ColoringIteration::ColorInterferenceGraph() { DCHECK_LE(num_regs_, kMaxNumRegs) << "kMaxNumRegs is too small"; - ArenaVector<LiveInterval*> colored_intervals( + ScopedArenaVector<LiveInterval*> colored_intervals( allocator_->Adapter(kArenaAllocRegisterAllocator)); bool successful = true; @@ -1888,16 +1912,18 @@ bool ColoringIteration::ColorInterferenceGraph() { return successful; } -void RegisterAllocatorGraphColor::AllocateSpillSlots(const ArenaVector<InterferenceNode*>& nodes) { +void RegisterAllocatorGraphColor::AllocateSpillSlots(ArrayRef<InterferenceNode* const> nodes) { // The register allocation resolver will organize the stack based on value type, // so we assign stack slots for each value type separately. - ArenaVector<LiveInterval*> double_intervals(allocator_->Adapter(kArenaAllocRegisterAllocator)); - ArenaVector<LiveInterval*> long_intervals(allocator_->Adapter(kArenaAllocRegisterAllocator)); - ArenaVector<LiveInterval*> float_intervals(allocator_->Adapter(kArenaAllocRegisterAllocator)); - ArenaVector<LiveInterval*> int_intervals(allocator_->Adapter(kArenaAllocRegisterAllocator)); + ScopedArenaAllocator allocator(allocator_->GetArenaStack()); + ScopedArenaAllocatorAdapter<void> adapter = allocator.Adapter(kArenaAllocRegisterAllocator); + ScopedArenaVector<LiveInterval*> double_intervals(adapter); + ScopedArenaVector<LiveInterval*> long_intervals(adapter); + ScopedArenaVector<LiveInterval*> float_intervals(adapter); + ScopedArenaVector<LiveInterval*> int_intervals(adapter); // The set of parent intervals already handled. - ArenaSet<LiveInterval*> seen(allocator_->Adapter(kArenaAllocRegisterAllocator)); + ScopedArenaSet<LiveInterval*> seen(adapter); // Find nodes that need spill slots. for (InterferenceNode* node : nodes) { @@ -1954,23 +1980,24 @@ void RegisterAllocatorGraphColor::AllocateSpillSlots(const ArenaVector<Interfere } // Color spill slots for each value type. - ColorSpillSlots(&double_intervals, &num_double_spill_slots_); - ColorSpillSlots(&long_intervals, &num_long_spill_slots_); - ColorSpillSlots(&float_intervals, &num_float_spill_slots_); - ColorSpillSlots(&int_intervals, &num_int_spill_slots_); + ColorSpillSlots(ArrayRef<LiveInterval* const>(double_intervals), &num_double_spill_slots_); + ColorSpillSlots(ArrayRef<LiveInterval* const>(long_intervals), &num_long_spill_slots_); + ColorSpillSlots(ArrayRef<LiveInterval* const>(float_intervals), &num_float_spill_slots_); + ColorSpillSlots(ArrayRef<LiveInterval* const>(int_intervals), &num_int_spill_slots_); } -void RegisterAllocatorGraphColor::ColorSpillSlots(ArenaVector<LiveInterval*>* intervals, - size_t* num_stack_slots_used) { +void RegisterAllocatorGraphColor::ColorSpillSlots(ArrayRef<LiveInterval* const> intervals, + /* out */ size_t* num_stack_slots_used) { // We cannot use the original interference graph here because spill slots are assigned to // all of the siblings of an interval, whereas an interference node represents only a single // sibling. So, we assign spill slots linear-scan-style by sorting all the interval endpoints // by position, and assigning the lowest spill slot available when we encounter an interval // beginning. We ignore lifetime holes for simplicity. - ArenaVector<std::tuple<size_t, bool, LiveInterval*>> interval_endpoints( - allocator_->Adapter(kArenaAllocRegisterAllocator)); + ScopedArenaAllocator allocator(allocator_->GetArenaStack()); + ScopedArenaVector<std::tuple<size_t, bool, LiveInterval*>> interval_endpoints( + allocator.Adapter(kArenaAllocRegisterAllocator)); - for (LiveInterval* parent_interval : *intervals) { + for (LiveInterval* parent_interval : intervals) { DCHECK(parent_interval->IsParent()); DCHECK(!parent_interval->HasSpillSlot()); size_t start = parent_interval->GetStart(); @@ -1990,7 +2017,7 @@ void RegisterAllocatorGraphColor::ColorSpillSlots(ArenaVector<LiveInterval*>* in < std::tie(std::get<0>(rhs), std::get<1>(rhs)); }); - ArenaBitVector taken(allocator_, 0, true); + ArenaBitVector taken(&allocator, 0, true, kArenaAllocRegisterAllocator); for (auto it = interval_endpoints.begin(), end = interval_endpoints.end(); it != end; ++it) { // Extract information from the current tuple. LiveInterval* parent_interval; diff --git a/compiler/optimizing/register_allocator_graph_color.h b/compiler/optimizing/register_allocator_graph_color.h index 3f6d674905..3072c92e0f 100644 --- a/compiler/optimizing/register_allocator_graph_color.h +++ b/compiler/optimizing/register_allocator_graph_color.h @@ -18,9 +18,10 @@ #define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_GRAPH_COLOR_H_ #include "arch/instruction_set.h" -#include "base/arena_containers.h" #include "base/arena_object.h" +#include "base/array_ref.h" #include "base/macros.h" +#include "base/scoped_arena_containers.h" #include "register_allocator.h" namespace art { @@ -85,11 +86,11 @@ enum class CoalesceKind; */ class RegisterAllocatorGraphColor : public RegisterAllocator { public: - RegisterAllocatorGraphColor(ArenaAllocator* allocator, + RegisterAllocatorGraphColor(ScopedArenaAllocator* allocator, CodeGenerator* codegen, const SsaLivenessAnalysis& analysis, bool iterative_move_coalescing = true); - ~RegisterAllocatorGraphColor() OVERRIDE {} + ~RegisterAllocatorGraphColor() OVERRIDE; void AllocateRegisters() OVERRIDE; @@ -141,11 +142,10 @@ class RegisterAllocatorGraphColor : public RegisterAllocator { // Assigns stack slots to a list of intervals, ensuring that interfering intervals are not // assigned the same stack slot. - void ColorSpillSlots(ArenaVector<LiveInterval*>* nodes, - size_t* num_stack_slots_used); + void ColorSpillSlots(ArrayRef<LiveInterval* const> nodes, /* out */ size_t* num_stack_slots_used); // Provide stack slots to nodes that need them. - void AllocateSpillSlots(const ArenaVector<InterferenceNode*>& nodes); + void AllocateSpillSlots(ArrayRef<InterferenceNode* const> nodes); // Whether iterative move coalescing should be performed. Iterative move coalescing // improves code quality, but increases compile time. @@ -154,19 +154,19 @@ class RegisterAllocatorGraphColor : public RegisterAllocator { // Live intervals, split by kind (core and floating point). // These should not contain high intervals, as those are represented by // the corresponding low interval throughout register allocation. - ArenaVector<LiveInterval*> core_intervals_; - ArenaVector<LiveInterval*> fp_intervals_; + ScopedArenaVector<LiveInterval*> core_intervals_; + ScopedArenaVector<LiveInterval*> fp_intervals_; // Intervals for temporaries, saved for special handling in the resolution phase. - ArenaVector<LiveInterval*> temp_intervals_; + ScopedArenaVector<LiveInterval*> temp_intervals_; // Safepoints, saved for special handling while processing instructions. - ArenaVector<HInstruction*> safepoints_; + ScopedArenaVector<HInstruction*> safepoints_; // Interference nodes representing specific registers. These are "pre-colored" nodes // in the interference graph. - ArenaVector<InterferenceNode*> physical_core_nodes_; - ArenaVector<InterferenceNode*> physical_fp_nodes_; + ScopedArenaVector<InterferenceNode*> physical_core_nodes_; + ScopedArenaVector<InterferenceNode*> physical_fp_nodes_; // Allocated stack slot counters. size_t num_int_spill_slots_; diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc index 9803a7b650..cfe63bd758 100644 --- a/compiler/optimizing/register_allocator_linear_scan.cc +++ b/compiler/optimizing/register_allocator_linear_scan.cc @@ -40,7 +40,7 @@ static bool IsLowOfUnalignedPairInterval(LiveInterval* low) { return GetHighForLowRegister(low->GetRegister()) != low->GetHighInterval()->GetRegister(); } -RegisterAllocatorLinearScan::RegisterAllocatorLinearScan(ArenaAllocator* allocator, +RegisterAllocatorLinearScan::RegisterAllocatorLinearScan(ScopedArenaAllocator* allocator, CodeGenerator* codegen, const SsaLivenessAnalysis& liveness) : RegisterAllocator(allocator, codegen, liveness), @@ -81,6 +81,8 @@ RegisterAllocatorLinearScan::RegisterAllocatorLinearScan(ArenaAllocator* allocat reserved_out_slots_ = ptr_size / kVRegSize + codegen->GetGraph()->GetMaximumNumberOfOutVRegs(); } +RegisterAllocatorLinearScan::~RegisterAllocatorLinearScan() {} + static bool ShouldProcess(bool processing_core_registers, LiveInterval* interval) { if (interval == nullptr) return false; bool is_core_register = (interval->GetType() != DataType::Type::kFloat64) @@ -90,7 +92,7 @@ static bool ShouldProcess(bool processing_core_registers, LiveInterval* interval void RegisterAllocatorLinearScan::AllocateRegisters() { AllocateRegistersInternal(); - RegisterAllocationResolver(allocator_, codegen_, liveness_) + RegisterAllocationResolver(codegen_, liveness_) .Resolve(ArrayRef<HInstruction* const>(safepoints_), reserved_out_slots_, int_spill_slots_.size(), @@ -98,7 +100,7 @@ void RegisterAllocatorLinearScan::AllocateRegisters() { float_spill_slots_.size(), double_spill_slots_.size(), catch_phi_spill_slots_, - temp_intervals_); + ArrayRef<LiveInterval* const>(temp_intervals_)); if (kIsDebugBuild) { processing_core_registers_ = true; @@ -298,7 +300,7 @@ void RegisterAllocatorLinearScan::ProcessInstruction(HInstruction* instruction) LiveInterval* current = instruction->GetLiveInterval(); if (current == nullptr) return; - ArenaVector<LiveInterval*>& unhandled = core_register + ScopedArenaVector<LiveInterval*>& unhandled = core_register ? unhandled_core_intervals_ : unhandled_fp_intervals_; @@ -425,7 +427,9 @@ class AllRangesIterator : public ValueObject { bool RegisterAllocatorLinearScan::ValidateInternal(bool log_fatal_on_failure) const { // To simplify unit testing, we eagerly create the array of intervals, and // call the helper method. - ArenaVector<LiveInterval*> intervals(allocator_->Adapter(kArenaAllocRegisterAllocatorValidate)); + ScopedArenaAllocator allocator(allocator_->GetArenaStack()); + ScopedArenaVector<LiveInterval*> intervals( + allocator.Adapter(kArenaAllocRegisterAllocatorValidate)); for (size_t i = 0; i < liveness_.GetNumberOfSsaValues(); ++i) { HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i); if (ShouldProcess(processing_core_registers_, instruction->GetLiveInterval())) { @@ -433,7 +437,7 @@ bool RegisterAllocatorLinearScan::ValidateInternal(bool log_fatal_on_failure) co } } - const ArenaVector<LiveInterval*>* physical_register_intervals = processing_core_registers_ + const ScopedArenaVector<LiveInterval*>* physical_register_intervals = processing_core_registers_ ? &physical_core_register_intervals_ : &physical_fp_register_intervals_; for (LiveInterval* fixed : *physical_register_intervals) { @@ -448,8 +452,12 @@ bool RegisterAllocatorLinearScan::ValidateInternal(bool log_fatal_on_failure) co } } - return ValidateIntervals(intervals, GetNumberOfSpillSlots(), reserved_out_slots_, *codegen_, - allocator_, processing_core_registers_, log_fatal_on_failure); + return ValidateIntervals(ArrayRef<LiveInterval* const>(intervals), + GetNumberOfSpillSlots(), + reserved_out_slots_, + *codegen_, + processing_core_registers_, + log_fatal_on_failure); } void RegisterAllocatorLinearScan::DumpInterval(std::ostream& stream, LiveInterval* interval) const { @@ -813,7 +821,7 @@ int RegisterAllocatorLinearScan::FindAvailableRegister(size_t* next_use, LiveInt // Remove interval and its other half if any. Return iterator to the following element. static ArenaVector<LiveInterval*>::iterator RemoveIntervalAndPotentialOtherHalf( - ArenaVector<LiveInterval*>* intervals, ArenaVector<LiveInterval*>::iterator pos) { + ScopedArenaVector<LiveInterval*>* intervals, ScopedArenaVector<LiveInterval*>::iterator pos) { DCHECK(intervals->begin() <= pos && pos < intervals->end()); LiveInterval* interval = *pos; if (interval->IsLowInterval()) { @@ -1044,7 +1052,8 @@ bool RegisterAllocatorLinearScan::AllocateBlockedReg(LiveInterval* current) { } } -void RegisterAllocatorLinearScan::AddSorted(ArenaVector<LiveInterval*>* array, LiveInterval* interval) { +void RegisterAllocatorLinearScan::AddSorted(ScopedArenaVector<LiveInterval*>* array, + LiveInterval* interval) { DCHECK(!interval->IsFixed() && !interval->HasSpillSlot()); size_t insert_at = 0; for (size_t i = array->size(); i > 0; --i) { @@ -1102,7 +1111,7 @@ void RegisterAllocatorLinearScan::AllocateSpillSlotFor(LiveInterval* interval) { return; } - ArenaVector<size_t>* spill_slots = nullptr; + ScopedArenaVector<size_t>* spill_slots = nullptr; switch (interval->GetType()) { case DataType::Type::kFloat64: spill_slots = &double_spill_slots_; diff --git a/compiler/optimizing/register_allocator_linear_scan.h b/compiler/optimizing/register_allocator_linear_scan.h index 9c650a44d2..36788b7c3c 100644 --- a/compiler/optimizing/register_allocator_linear_scan.h +++ b/compiler/optimizing/register_allocator_linear_scan.h @@ -18,7 +18,7 @@ #define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_LINEAR_SCAN_H_ #include "arch/instruction_set.h" -#include "base/arena_containers.h" +#include "base/scoped_arena_containers.h" #include "base/macros.h" #include "register_allocator.h" @@ -39,10 +39,10 @@ class SsaLivenessAnalysis; */ class RegisterAllocatorLinearScan : public RegisterAllocator { public: - RegisterAllocatorLinearScan(ArenaAllocator* allocator, + RegisterAllocatorLinearScan(ScopedArenaAllocator* allocator, CodeGenerator* codegen, const SsaLivenessAnalysis& analysis); - ~RegisterAllocatorLinearScan() OVERRIDE {} + ~RegisterAllocatorLinearScan() OVERRIDE; void AllocateRegisters() OVERRIDE; @@ -70,7 +70,7 @@ class RegisterAllocatorLinearScan : public RegisterAllocator { bool AllocateBlockedReg(LiveInterval* interval); // Add `interval` in the given sorted list. - static void AddSorted(ArenaVector<LiveInterval*>* array, LiveInterval* interval); + static void AddSorted(ScopedArenaVector<LiveInterval*>* array, LiveInterval* interval); // Returns whether `reg` is blocked by the code generator. bool IsBlocked(int reg) const; @@ -107,43 +107,43 @@ class RegisterAllocatorLinearScan : public RegisterAllocator { // List of intervals for core registers that must be processed, ordered by start // position. Last entry is the interval that has the lowest start position. // This list is initially populated before doing the linear scan. - ArenaVector<LiveInterval*> unhandled_core_intervals_; + ScopedArenaVector<LiveInterval*> unhandled_core_intervals_; // List of intervals for floating-point registers. Same comments as above. - ArenaVector<LiveInterval*> unhandled_fp_intervals_; + ScopedArenaVector<LiveInterval*> unhandled_fp_intervals_; // Currently processed list of unhandled intervals. Either `unhandled_core_intervals_` // or `unhandled_fp_intervals_`. - ArenaVector<LiveInterval*>* unhandled_; + ScopedArenaVector<LiveInterval*>* unhandled_; // List of intervals that have been processed. - ArenaVector<LiveInterval*> handled_; + ScopedArenaVector<LiveInterval*> handled_; // List of intervals that are currently active when processing a new live interval. // That is, they have a live range that spans the start of the new interval. - ArenaVector<LiveInterval*> active_; + ScopedArenaVector<LiveInterval*> active_; // List of intervals that are currently inactive when processing a new live interval. // That is, they have a lifetime hole that spans the start of the new interval. - ArenaVector<LiveInterval*> inactive_; + ScopedArenaVector<LiveInterval*> inactive_; // Fixed intervals for physical registers. Such intervals cover the positions // where an instruction requires a specific register. - ArenaVector<LiveInterval*> physical_core_register_intervals_; - ArenaVector<LiveInterval*> physical_fp_register_intervals_; + ScopedArenaVector<LiveInterval*> physical_core_register_intervals_; + ScopedArenaVector<LiveInterval*> physical_fp_register_intervals_; // Intervals for temporaries. Such intervals cover the positions // where an instruction requires a temporary. - ArenaVector<LiveInterval*> temp_intervals_; + ScopedArenaVector<LiveInterval*> temp_intervals_; // The spill slots allocated for live intervals. We ensure spill slots // are typed to avoid (1) doing moves and swaps between two different kinds // of registers, and (2) swapping between a single stack slot and a double // stack slot. This simplifies the parallel move resolver. - ArenaVector<size_t> int_spill_slots_; - ArenaVector<size_t> long_spill_slots_; - ArenaVector<size_t> float_spill_slots_; - ArenaVector<size_t> double_spill_slots_; + ScopedArenaVector<size_t> int_spill_slots_; + ScopedArenaVector<size_t> long_spill_slots_; + ScopedArenaVector<size_t> float_spill_slots_; + ScopedArenaVector<size_t> double_spill_slots_; // Spill slots allocated to catch phis. This category is special-cased because // (1) slots are allocated prior to linear scan and in reverse linear order, @@ -151,7 +151,7 @@ class RegisterAllocatorLinearScan : public RegisterAllocator { size_t catch_phi_spill_slots_; // Instructions that need a safepoint. - ArenaVector<HInstruction*> safepoints_; + ScopedArenaVector<HInstruction*> safepoints_; // True if processing core registers. False if processing floating // point registers. diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc index 59987e26b6..69ed8c7fcc 100644 --- a/compiler/optimizing/register_allocator_test.cc +++ b/compiler/optimizing/register_allocator_test.cc @@ -38,12 +38,36 @@ using Strategy = RegisterAllocator::Strategy; // Note: the register allocator tests rely on the fact that constants have live // intervals and registers get allocated to them. -class RegisterAllocatorTest : public CommonCompilerTest { +class RegisterAllocatorTest : public OptimizingUnitTest { protected: // These functions need to access private variables of LocationSummary, so we declare it // as a member of RegisterAllocatorTest, which we make a friend class. - static void SameAsFirstInputHint(Strategy strategy); - static void ExpectedInRegisterHint(Strategy strategy); + void SameAsFirstInputHint(Strategy strategy); + void ExpectedInRegisterHint(Strategy strategy); + + // Helper functions that make use of the OptimizingUnitTest's members. + bool Check(const uint16_t* data, Strategy strategy); + void CFG1(Strategy strategy); + void Loop1(Strategy strategy); + void Loop2(Strategy strategy); + void Loop3(Strategy strategy); + void DeadPhi(Strategy strategy); + HGraph* BuildIfElseWithPhi(HPhi** phi, HInstruction** input1, HInstruction** input2); + void PhiHint(Strategy strategy); + HGraph* BuildFieldReturn(HInstruction** field, HInstruction** ret); + HGraph* BuildTwoSubs(HInstruction** first_sub, HInstruction** second_sub); + HGraph* BuildDiv(HInstruction** div); + void ExpectedExactInRegisterAndSameOutputHint(Strategy strategy); + + bool ValidateIntervals(const ScopedArenaVector<LiveInterval*>& intervals, + const CodeGenerator& codegen) { + return RegisterAllocator::ValidateIntervals(ArrayRef<LiveInterval* const>(intervals), + /* number_of_spill_slots */ 0u, + /* number_of_out_slots */ 0u, + codegen, + /* processing_core_registers */ true, + /* log_fatal_on_failure */ false); + } }; // This macro should include all register allocation strategies that should be tested. @@ -55,17 +79,15 @@ TEST_F(RegisterAllocatorTest, test_name##_GraphColor) {\ test_name(Strategy::kRegisterAllocatorGraphColor);\ } -static bool Check(const uint16_t* data, Strategy strategy) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - HGraph* graph = CreateCFG(&allocator, data); +bool RegisterAllocatorTest::Check(const uint16_t* data, Strategy strategy) { + HGraph* graph = CreateCFG(data); std::unique_ptr<const X86InstructionSetFeatures> features_x86( X86InstructionSetFeatures::FromCppDefines()); x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions()); - SsaLivenessAnalysis liveness(graph, &codegen); + SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator()); liveness.Analyze(); - RegisterAllocator* register_allocator = - RegisterAllocator::Create(&allocator, &codegen, liveness, strategy); + std::unique_ptr<RegisterAllocator> register_allocator = + RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy); register_allocator->AllocateRegisters(); return register_allocator->Validate(false); } @@ -75,95 +97,82 @@ static bool Check(const uint16_t* data, Strategy strategy) { * tests are based on this validation method. */ TEST_F(RegisterAllocatorTest, ValidateIntervals) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - HGraph* graph = CreateGraph(&allocator); + HGraph* graph = CreateGraph(); std::unique_ptr<const X86InstructionSetFeatures> features_x86( X86InstructionSetFeatures::FromCppDefines()); x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions()); - ArenaVector<LiveInterval*> intervals(allocator.Adapter()); + ScopedArenaVector<LiveInterval*> intervals(GetScopedAllocator()->Adapter()); // Test with two intervals of the same range. { static constexpr size_t ranges[][2] = {{0, 42}}; - intervals.push_back(BuildInterval(ranges, arraysize(ranges), &allocator, 0)); - intervals.push_back(BuildInterval(ranges, arraysize(ranges), &allocator, 1)); - ASSERT_TRUE(RegisterAllocator::ValidateIntervals( - intervals, 0, 0, codegen, &allocator, true, false)); + intervals.push_back(BuildInterval(ranges, arraysize(ranges), GetScopedAllocator(), 0)); + intervals.push_back(BuildInterval(ranges, arraysize(ranges), GetScopedAllocator(), 1)); + ASSERT_TRUE(ValidateIntervals(intervals, codegen)); intervals[1]->SetRegister(0); - ASSERT_FALSE(RegisterAllocator::ValidateIntervals( - intervals, 0, 0, codegen, &allocator, true, false)); + ASSERT_FALSE(ValidateIntervals(intervals, codegen)); intervals.clear(); } // Test with two non-intersecting intervals. { static constexpr size_t ranges1[][2] = {{0, 42}}; - intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0)); + intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), GetScopedAllocator(), 0)); static constexpr size_t ranges2[][2] = {{42, 43}}; - intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1)); - ASSERT_TRUE(RegisterAllocator::ValidateIntervals( - intervals, 0, 0, codegen, &allocator, true, false)); + intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), GetScopedAllocator(), 1)); + ASSERT_TRUE(ValidateIntervals(intervals, codegen)); intervals[1]->SetRegister(0); - ASSERT_TRUE(RegisterAllocator::ValidateIntervals( - intervals, 0, 0, codegen, &allocator, true, false)); + ASSERT_TRUE(ValidateIntervals(intervals, codegen)); intervals.clear(); } // Test with two non-intersecting intervals, with one with a lifetime hole. { static constexpr size_t ranges1[][2] = {{0, 42}, {45, 48}}; - intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0)); + intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), GetScopedAllocator(), 0)); static constexpr size_t ranges2[][2] = {{42, 43}}; - intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1)); - ASSERT_TRUE(RegisterAllocator::ValidateIntervals( - intervals, 0, 0, codegen, &allocator, true, false)); + intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), GetScopedAllocator(), 1)); + ASSERT_TRUE(ValidateIntervals(intervals, codegen)); intervals[1]->SetRegister(0); - ASSERT_TRUE(RegisterAllocator::ValidateIntervals( - intervals, 0, 0, codegen, &allocator, true, false)); + ASSERT_TRUE(ValidateIntervals(intervals, codegen)); intervals.clear(); } // Test with intersecting intervals. { static constexpr size_t ranges1[][2] = {{0, 42}, {44, 48}}; - intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0)); + intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), GetScopedAllocator(), 0)); static constexpr size_t ranges2[][2] = {{42, 47}}; - intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1)); - ASSERT_TRUE(RegisterAllocator::ValidateIntervals( - intervals, 0, 0, codegen, &allocator, true, false)); + intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), GetScopedAllocator(), 1)); + ASSERT_TRUE(ValidateIntervals(intervals, codegen)); intervals[1]->SetRegister(0); - ASSERT_FALSE(RegisterAllocator::ValidateIntervals( - intervals, 0, 0, codegen, &allocator, true, false)); + ASSERT_FALSE(ValidateIntervals(intervals, codegen)); intervals.clear(); } // Test with siblings. { static constexpr size_t ranges1[][2] = {{0, 42}, {44, 48}}; - intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0)); + intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), GetScopedAllocator(), 0)); intervals[0]->SplitAt(43); static constexpr size_t ranges2[][2] = {{42, 47}}; - intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1)); - ASSERT_TRUE(RegisterAllocator::ValidateIntervals( - intervals, 0, 0, codegen, &allocator, true, false)); + intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), GetScopedAllocator(), 1)); + ASSERT_TRUE(ValidateIntervals(intervals, codegen)); intervals[1]->SetRegister(0); // Sibling of the first interval has no register allocated to it. - ASSERT_TRUE(RegisterAllocator::ValidateIntervals( - intervals, 0, 0, codegen, &allocator, true, false)); + ASSERT_TRUE(ValidateIntervals(intervals, codegen)); intervals[0]->GetNextSibling()->SetRegister(0); - ASSERT_FALSE(RegisterAllocator::ValidateIntervals( - intervals, 0, 0, codegen, &allocator, true, false)); + ASSERT_FALSE(ValidateIntervals(intervals, codegen)); } } -static void CFG1(Strategy strategy) { +void RegisterAllocatorTest::CFG1(Strategy strategy) { /* * Test the following snippet: * return 0; @@ -185,7 +194,7 @@ static void CFG1(Strategy strategy) { TEST_ALL_STRATEGIES(CFG1); -static void Loop1(Strategy strategy) { +void RegisterAllocatorTest::Loop1(Strategy strategy) { /* * Test the following snippet: * int a = 0; @@ -226,7 +235,7 @@ static void Loop1(Strategy strategy) { TEST_ALL_STRATEGIES(Loop1); -static void Loop2(Strategy strategy) { +void RegisterAllocatorTest::Loop2(Strategy strategy) { /* * Test the following snippet: * int a = 0; @@ -277,7 +286,7 @@ static void Loop2(Strategy strategy) { TEST_ALL_STRATEGIES(Loop2); -static void Loop3(Strategy strategy) { +void RegisterAllocatorTest::Loop3(Strategy strategy) { /* * Test the following snippet: * int a = 0 @@ -314,16 +323,14 @@ static void Loop3(Strategy strategy) { Instruction::MOVE | 1 << 12 | 0 << 8, Instruction::GOTO | 0xF900); - ArenaPool pool; - ArenaAllocator allocator(&pool); - HGraph* graph = CreateCFG(&allocator, data); + HGraph* graph = CreateCFG(data); std::unique_ptr<const X86InstructionSetFeatures> features_x86( X86InstructionSetFeatures::FromCppDefines()); x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions()); - SsaLivenessAnalysis liveness(graph, &codegen); + SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator()); liveness.Analyze(); - RegisterAllocator* register_allocator = - RegisterAllocator::Create(&allocator, &codegen, liveness, strategy); + std::unique_ptr<RegisterAllocator> register_allocator = + RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy); register_allocator->AllocateRegisters(); ASSERT_TRUE(register_allocator->Validate(false)); @@ -351,13 +358,11 @@ TEST_F(RegisterAllocatorTest, FirstRegisterUse) { Instruction::XOR_INT_LIT8 | 1 << 8, 1 << 8 | 1, Instruction::RETURN_VOID); - ArenaPool pool; - ArenaAllocator allocator(&pool); - HGraph* graph = CreateCFG(&allocator, data); + HGraph* graph = CreateCFG(data); std::unique_ptr<const X86InstructionSetFeatures> features_x86( X86InstructionSetFeatures::FromCppDefines()); x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions()); - SsaLivenessAnalysis liveness(graph, &codegen); + SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator()); liveness.Analyze(); HXor* first_xor = graph->GetBlocks()[1]->GetFirstInstruction()->AsXor(); @@ -383,7 +388,7 @@ TEST_F(RegisterAllocatorTest, FirstRegisterUse) { ASSERT_EQ(new_interval->FirstRegisterUse(), last_xor->GetLifetimePosition()); } -static void DeadPhi(Strategy strategy) { +void RegisterAllocatorTest::DeadPhi(Strategy strategy) { /* Test for a dead loop phi taking as back-edge input a phi that also has * this loop phi as input. Walking backwards in SsaDeadPhiElimination * does not solve the problem because the loop phi will be visited last. @@ -405,17 +410,15 @@ static void DeadPhi(Strategy strategy) { Instruction::GOTO | 0xFD00, Instruction::RETURN_VOID); - ArenaPool pool; - ArenaAllocator allocator(&pool); - HGraph* graph = CreateCFG(&allocator, data); + HGraph* graph = CreateCFG(data); SsaDeadPhiElimination(graph).Run(); std::unique_ptr<const X86InstructionSetFeatures> features_x86( X86InstructionSetFeatures::FromCppDefines()); x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions()); - SsaLivenessAnalysis liveness(graph, &codegen); + SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator()); liveness.Analyze(); - RegisterAllocator* register_allocator = - RegisterAllocator::Create(&allocator, &codegen, liveness, strategy); + std::unique_ptr<RegisterAllocator> register_allocator = + RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy); register_allocator->AllocateRegisters(); ASSERT_TRUE(register_allocator->Validate(false)); } @@ -433,16 +436,14 @@ TEST_F(RegisterAllocatorTest, FreeUntil) { Instruction::CONST_4 | 0 | 0, Instruction::RETURN); - ArenaPool pool; - ArenaAllocator allocator(&pool); - HGraph* graph = CreateCFG(&allocator, data); + HGraph* graph = CreateCFG(data); SsaDeadPhiElimination(graph).Run(); std::unique_ptr<const X86InstructionSetFeatures> features_x86( X86InstructionSetFeatures::FromCppDefines()); x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions()); - SsaLivenessAnalysis liveness(graph, &codegen); + SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator()); liveness.Analyze(); - RegisterAllocatorLinearScan register_allocator(&allocator, &codegen, liveness); + RegisterAllocatorLinearScan register_allocator(GetScopedAllocator(), &codegen, liveness); // Add an artifical range to cover the temps that will be put in the unhandled list. LiveInterval* unhandled = graph->GetEntryBlock()->GetFirstInstruction()->GetLiveInterval(); @@ -461,20 +462,21 @@ TEST_F(RegisterAllocatorTest, FreeUntil) { // Add three temps holding the same register, and starting at different positions. // Put the one that should be picked in the middle of the inactive list to ensure // we do not depend on an order. - LiveInterval* interval = LiveInterval::MakeFixedInterval(&allocator, 0, DataType::Type::kInt32); + LiveInterval* interval = + LiveInterval::MakeFixedInterval(GetScopedAllocator(), 0, DataType::Type::kInt32); interval->AddRange(40, 50); register_allocator.inactive_.push_back(interval); - interval = LiveInterval::MakeFixedInterval(&allocator, 0, DataType::Type::kInt32); + interval = LiveInterval::MakeFixedInterval(GetScopedAllocator(), 0, DataType::Type::kInt32); interval->AddRange(20, 30); register_allocator.inactive_.push_back(interval); - interval = LiveInterval::MakeFixedInterval(&allocator, 0, DataType::Type::kInt32); + interval = LiveInterval::MakeFixedInterval(GetScopedAllocator(), 0, DataType::Type::kInt32); interval->AddRange(60, 70); register_allocator.inactive_.push_back(interval); register_allocator.number_of_registers_ = 1; - register_allocator.registers_array_ = allocator.AllocArray<size_t>(1); + register_allocator.registers_array_ = GetAllocator()->AllocArray<size_t>(1); register_allocator.processing_core_registers_ = true; register_allocator.unhandled_ = ®ister_allocator.unhandled_core_intervals_; @@ -487,36 +489,35 @@ TEST_F(RegisterAllocatorTest, FreeUntil) { ASSERT_EQ(20u, register_allocator.unhandled_->front()->GetStart()); } -static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator, - HPhi** phi, - HInstruction** input1, - HInstruction** input2) { - HGraph* graph = CreateGraph(allocator); - HBasicBlock* entry = new (allocator) HBasicBlock(graph); +HGraph* RegisterAllocatorTest::BuildIfElseWithPhi(HPhi** phi, + HInstruction** input1, + HInstruction** input2) { + HGraph* graph = CreateGraph(); + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(entry); graph->SetEntryBlock(entry); - HInstruction* parameter = new (allocator) HParameterValue( + HInstruction* parameter = new (GetAllocator()) HParameterValue( graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); entry->AddInstruction(parameter); - HBasicBlock* block = new (allocator) HBasicBlock(graph); + HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(block); entry->AddSuccessor(block); - HInstruction* test = new (allocator) HInstanceFieldGet(parameter, - nullptr, - DataType::Type::kBool, - MemberOffset(22), - false, - kUnknownFieldIndex, - kUnknownClassDefIndex, - graph->GetDexFile(), - 0); + HInstruction* test = new (GetAllocator()) HInstanceFieldGet(parameter, + nullptr, + DataType::Type::kBool, + MemberOffset(22), + false, + kUnknownFieldIndex, + kUnknownClassDefIndex, + graph->GetDexFile(), + 0); block->AddInstruction(test); - block->AddInstruction(new (allocator) HIf(test)); - HBasicBlock* then = new (allocator) HBasicBlock(graph); - HBasicBlock* else_ = new (allocator) HBasicBlock(graph); - HBasicBlock* join = new (allocator) HBasicBlock(graph); + block->AddInstruction(new (GetAllocator()) HIf(test)); + HBasicBlock* then = new (GetAllocator()) HBasicBlock(graph); + HBasicBlock* else_ = new (GetAllocator()) HBasicBlock(graph); + HBasicBlock* join = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(then); graph->AddBlock(else_); graph->AddBlock(join); @@ -525,32 +526,32 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator, block->AddSuccessor(else_); then->AddSuccessor(join); else_->AddSuccessor(join); - then->AddInstruction(new (allocator) HGoto()); - else_->AddInstruction(new (allocator) HGoto()); + then->AddInstruction(new (GetAllocator()) HGoto()); + else_->AddInstruction(new (GetAllocator()) HGoto()); - *phi = new (allocator) HPhi(allocator, 0, 0, DataType::Type::kInt32); + *phi = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32); join->AddPhi(*phi); - *input1 = new (allocator) HInstanceFieldGet(parameter, - nullptr, - DataType::Type::kInt32, - MemberOffset(42), - false, - kUnknownFieldIndex, - kUnknownClassDefIndex, - graph->GetDexFile(), - 0); - *input2 = new (allocator) HInstanceFieldGet(parameter, - nullptr, - DataType::Type::kInt32, - MemberOffset(42), - false, - kUnknownFieldIndex, - kUnknownClassDefIndex, - graph->GetDexFile(), - 0); + *input1 = new (GetAllocator()) HInstanceFieldGet(parameter, + nullptr, + DataType::Type::kInt32, + MemberOffset(42), + false, + kUnknownFieldIndex, + kUnknownClassDefIndex, + graph->GetDexFile(), + 0); + *input2 = new (GetAllocator()) HInstanceFieldGet(parameter, + nullptr, + DataType::Type::kInt32, + MemberOffset(42), + false, + kUnknownFieldIndex, + kUnknownClassDefIndex, + graph->GetDexFile(), + 0); then->AddInstruction(*input1); else_->AddInstruction(*input2); - join->AddInstruction(new (allocator) HExit()); + join->AddInstruction(new (GetAllocator()) HExit()); (*phi)->AddInput(*input1); (*phi)->AddInput(*input2); @@ -559,23 +560,21 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator, return graph; } -static void PhiHint(Strategy strategy) { - ArenaPool pool; - ArenaAllocator allocator(&pool); +void RegisterAllocatorTest::PhiHint(Strategy strategy) { HPhi *phi; HInstruction *input1, *input2; { - HGraph* graph = BuildIfElseWithPhi(&allocator, &phi, &input1, &input2); + HGraph* graph = BuildIfElseWithPhi(&phi, &input1, &input2); std::unique_ptr<const X86InstructionSetFeatures> features_x86( X86InstructionSetFeatures::FromCppDefines()); x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions()); - SsaLivenessAnalysis liveness(graph, &codegen); + SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator()); liveness.Analyze(); // Check that the register allocator is deterministic. - RegisterAllocator* register_allocator = - RegisterAllocator::Create(&allocator, &codegen, liveness, strategy); + std::unique_ptr<RegisterAllocator> register_allocator = + RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy); register_allocator->AllocateRegisters(); ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 0); @@ -584,18 +583,18 @@ static void PhiHint(Strategy strategy) { } { - HGraph* graph = BuildIfElseWithPhi(&allocator, &phi, &input1, &input2); + HGraph* graph = BuildIfElseWithPhi(&phi, &input1, &input2); std::unique_ptr<const X86InstructionSetFeatures> features_x86( X86InstructionSetFeatures::FromCppDefines()); x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions()); - SsaLivenessAnalysis liveness(graph, &codegen); + SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator()); liveness.Analyze(); // Set the phi to a specific register, and check that the inputs get allocated // the same register. phi->GetLocations()->UpdateOut(Location::RegisterLocation(2)); - RegisterAllocator* register_allocator = - RegisterAllocator::Create(&allocator, &codegen, liveness, strategy); + std::unique_ptr<RegisterAllocator> register_allocator = + RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy); register_allocator->AllocateRegisters(); ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 2); @@ -604,18 +603,18 @@ static void PhiHint(Strategy strategy) { } { - HGraph* graph = BuildIfElseWithPhi(&allocator, &phi, &input1, &input2); + HGraph* graph = BuildIfElseWithPhi(&phi, &input1, &input2); std::unique_ptr<const X86InstructionSetFeatures> features_x86( X86InstructionSetFeatures::FromCppDefines()); x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions()); - SsaLivenessAnalysis liveness(graph, &codegen); + SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator()); liveness.Analyze(); // Set input1 to a specific register, and check that the phi and other input get allocated // the same register. input1->GetLocations()->UpdateOut(Location::RegisterLocation(2)); - RegisterAllocator* register_allocator = - RegisterAllocator::Create(&allocator, &codegen, liveness, strategy); + std::unique_ptr<RegisterAllocator> register_allocator = + RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy); register_allocator->AllocateRegisters(); ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 2); @@ -624,18 +623,18 @@ static void PhiHint(Strategy strategy) { } { - HGraph* graph = BuildIfElseWithPhi(&allocator, &phi, &input1, &input2); + HGraph* graph = BuildIfElseWithPhi(&phi, &input1, &input2); std::unique_ptr<const X86InstructionSetFeatures> features_x86( X86InstructionSetFeatures::FromCppDefines()); x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions()); - SsaLivenessAnalysis liveness(graph, &codegen); + SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator()); liveness.Analyze(); // Set input2 to a specific register, and check that the phi and other input get allocated // the same register. input2->GetLocations()->UpdateOut(Location::RegisterLocation(2)); - RegisterAllocator* register_allocator = - RegisterAllocator::Create(&allocator, &codegen, liveness, strategy); + std::unique_ptr<RegisterAllocator> register_allocator = + RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy); register_allocator->AllocateRegisters(); ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 2); @@ -650,58 +649,54 @@ TEST_F(RegisterAllocatorTest, PhiHint_LinearScan) { PhiHint(Strategy::kRegisterAllocatorLinearScan); } -static HGraph* BuildFieldReturn(ArenaAllocator* allocator, - HInstruction** field, - HInstruction** ret) { - HGraph* graph = CreateGraph(allocator); - HBasicBlock* entry = new (allocator) HBasicBlock(graph); +HGraph* RegisterAllocatorTest::BuildFieldReturn(HInstruction** field, HInstruction** ret) { + HGraph* graph = CreateGraph(); + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(entry); graph->SetEntryBlock(entry); - HInstruction* parameter = new (allocator) HParameterValue( + HInstruction* parameter = new (GetAllocator()) HParameterValue( graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); entry->AddInstruction(parameter); - HBasicBlock* block = new (allocator) HBasicBlock(graph); + HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(block); entry->AddSuccessor(block); - *field = new (allocator) HInstanceFieldGet(parameter, - nullptr, - DataType::Type::kInt32, - MemberOffset(42), - false, - kUnknownFieldIndex, - kUnknownClassDefIndex, - graph->GetDexFile(), - 0); + *field = new (GetAllocator()) HInstanceFieldGet(parameter, + nullptr, + DataType::Type::kInt32, + MemberOffset(42), + false, + kUnknownFieldIndex, + kUnknownClassDefIndex, + graph->GetDexFile(), + 0); block->AddInstruction(*field); - *ret = new (allocator) HReturn(*field); + *ret = new (GetAllocator()) HReturn(*field); block->AddInstruction(*ret); - HBasicBlock* exit = new (allocator) HBasicBlock(graph); + HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(exit); block->AddSuccessor(exit); - exit->AddInstruction(new (allocator) HExit()); + exit->AddInstruction(new (GetAllocator()) HExit()); graph->BuildDominatorTree(); return graph; } void RegisterAllocatorTest::ExpectedInRegisterHint(Strategy strategy) { - ArenaPool pool; - ArenaAllocator allocator(&pool); HInstruction *field, *ret; { - HGraph* graph = BuildFieldReturn(&allocator, &field, &ret); + HGraph* graph = BuildFieldReturn(&field, &ret); std::unique_ptr<const X86InstructionSetFeatures> features_x86( X86InstructionSetFeatures::FromCppDefines()); x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions()); - SsaLivenessAnalysis liveness(graph, &codegen); + SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator()); liveness.Analyze(); - RegisterAllocator* register_allocator = - RegisterAllocator::Create(&allocator, &codegen, liveness, strategy); + std::unique_ptr<RegisterAllocator> register_allocator = + RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy); register_allocator->AllocateRegisters(); // Sanity check that in normal conditions, the register should be hinted to 0 (EAX). @@ -709,19 +704,19 @@ void RegisterAllocatorTest::ExpectedInRegisterHint(Strategy strategy) { } { - HGraph* graph = BuildFieldReturn(&allocator, &field, &ret); + HGraph* graph = BuildFieldReturn(&field, &ret); std::unique_ptr<const X86InstructionSetFeatures> features_x86( X86InstructionSetFeatures::FromCppDefines()); x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions()); - SsaLivenessAnalysis liveness(graph, &codegen); + SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator()); liveness.Analyze(); // Check that the field gets put in the register expected by its use. // Don't use SetInAt because we are overriding an already allocated location. ret->GetLocations()->inputs_[0] = Location::RegisterLocation(2); - RegisterAllocator* register_allocator = - RegisterAllocator::Create(&allocator, &codegen, liveness, strategy); + std::unique_ptr<RegisterAllocator> register_allocator = + RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy); register_allocator->AllocateRegisters(); ASSERT_EQ(field->GetLiveInterval()->GetRegister(), 2); @@ -734,50 +729,46 @@ TEST_F(RegisterAllocatorTest, ExpectedInRegisterHint_LinearScan) { ExpectedInRegisterHint(Strategy::kRegisterAllocatorLinearScan); } -static HGraph* BuildTwoSubs(ArenaAllocator* allocator, - HInstruction** first_sub, - HInstruction** second_sub) { - HGraph* graph = CreateGraph(allocator); - HBasicBlock* entry = new (allocator) HBasicBlock(graph); +HGraph* RegisterAllocatorTest::BuildTwoSubs(HInstruction** first_sub, HInstruction** second_sub) { + HGraph* graph = CreateGraph(); + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(entry); graph->SetEntryBlock(entry); - HInstruction* parameter = new (allocator) HParameterValue( + HInstruction* parameter = new (GetAllocator()) HParameterValue( graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); entry->AddInstruction(parameter); HInstruction* constant1 = graph->GetIntConstant(1); HInstruction* constant2 = graph->GetIntConstant(2); - HBasicBlock* block = new (allocator) HBasicBlock(graph); + HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(block); entry->AddSuccessor(block); - *first_sub = new (allocator) HSub(DataType::Type::kInt32, parameter, constant1); + *first_sub = new (GetAllocator()) HSub(DataType::Type::kInt32, parameter, constant1); block->AddInstruction(*first_sub); - *second_sub = new (allocator) HSub(DataType::Type::kInt32, *first_sub, constant2); + *second_sub = new (GetAllocator()) HSub(DataType::Type::kInt32, *first_sub, constant2); block->AddInstruction(*second_sub); - block->AddInstruction(new (allocator) HExit()); + block->AddInstruction(new (GetAllocator()) HExit()); graph->BuildDominatorTree(); return graph; } void RegisterAllocatorTest::SameAsFirstInputHint(Strategy strategy) { - ArenaPool pool; - ArenaAllocator allocator(&pool); HInstruction *first_sub, *second_sub; { - HGraph* graph = BuildTwoSubs(&allocator, &first_sub, &second_sub); + HGraph* graph = BuildTwoSubs(&first_sub, &second_sub); std::unique_ptr<const X86InstructionSetFeatures> features_x86( X86InstructionSetFeatures::FromCppDefines()); x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions()); - SsaLivenessAnalysis liveness(graph, &codegen); + SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator()); liveness.Analyze(); - RegisterAllocator* register_allocator = - RegisterAllocator::Create(&allocator, &codegen, liveness, strategy); + std::unique_ptr<RegisterAllocator> register_allocator = + RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy); register_allocator->AllocateRegisters(); // Sanity check that in normal conditions, the registers are the same. @@ -786,11 +777,11 @@ void RegisterAllocatorTest::SameAsFirstInputHint(Strategy strategy) { } { - HGraph* graph = BuildTwoSubs(&allocator, &first_sub, &second_sub); + HGraph* graph = BuildTwoSubs(&first_sub, &second_sub); std::unique_ptr<const X86InstructionSetFeatures> features_x86( X86InstructionSetFeatures::FromCppDefines()); x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions()); - SsaLivenessAnalysis liveness(graph, &codegen); + SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator()); liveness.Analyze(); // check that both adds get the same register. @@ -799,8 +790,8 @@ void RegisterAllocatorTest::SameAsFirstInputHint(Strategy strategy) { ASSERT_EQ(first_sub->GetLocations()->Out().GetPolicy(), Location::kSameAsFirstInput); ASSERT_EQ(second_sub->GetLocations()->Out().GetPolicy(), Location::kSameAsFirstInput); - RegisterAllocator* register_allocator = - RegisterAllocator::Create(&allocator, &codegen, liveness, strategy); + std::unique_ptr<RegisterAllocator> register_allocator = + RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy); register_allocator->AllocateRegisters(); ASSERT_EQ(first_sub->GetLiveInterval()->GetRegister(), 2); @@ -814,53 +805,47 @@ TEST_F(RegisterAllocatorTest, SameAsFirstInputHint_LinearScan) { SameAsFirstInputHint(Strategy::kRegisterAllocatorLinearScan); } -static HGraph* BuildDiv(ArenaAllocator* allocator, - HInstruction** div) { - HGraph* graph = CreateGraph(allocator); - HBasicBlock* entry = new (allocator) HBasicBlock(graph); +HGraph* RegisterAllocatorTest::BuildDiv(HInstruction** div) { + HGraph* graph = CreateGraph(); + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(entry); graph->SetEntryBlock(entry); - HInstruction* first = new (allocator) HParameterValue( + HInstruction* first = new (GetAllocator()) HParameterValue( graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); - HInstruction* second = new (allocator) HParameterValue( + HInstruction* second = new (GetAllocator()) HParameterValue( graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); entry->AddInstruction(first); entry->AddInstruction(second); - HBasicBlock* block = new (allocator) HBasicBlock(graph); + HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(block); entry->AddSuccessor(block); - *div = - new (allocator) HDiv(DataType::Type::kInt32, first, second, 0); // don't care about dex_pc. + *div = new (GetAllocator()) HDiv( + DataType::Type::kInt32, first, second, 0); // don't care about dex_pc. block->AddInstruction(*div); - block->AddInstruction(new (allocator) HExit()); + block->AddInstruction(new (GetAllocator()) HExit()); graph->BuildDominatorTree(); return graph; } -static void ExpectedExactInRegisterAndSameOutputHint(Strategy strategy) { - ArenaPool pool; - ArenaAllocator allocator(&pool); +void RegisterAllocatorTest::ExpectedExactInRegisterAndSameOutputHint(Strategy strategy) { HInstruction *div; + HGraph* graph = BuildDiv(&div); + std::unique_ptr<const X86InstructionSetFeatures> features_x86( + X86InstructionSetFeatures::FromCppDefines()); + x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions()); + SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator()); + liveness.Analyze(); - { - HGraph* graph = BuildDiv(&allocator, &div); - std::unique_ptr<const X86InstructionSetFeatures> features_x86( - X86InstructionSetFeatures::FromCppDefines()); - x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions()); - SsaLivenessAnalysis liveness(graph, &codegen); - liveness.Analyze(); - - RegisterAllocator* register_allocator = - RegisterAllocator::Create(&allocator, &codegen, liveness, strategy); - register_allocator->AllocateRegisters(); + std::unique_ptr<RegisterAllocator> register_allocator = + RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy); + register_allocator->AllocateRegisters(); - // div on x86 requires its first input in eax and the output be the same as the first input. - ASSERT_EQ(div->GetLiveInterval()->GetRegister(), 0); - } + // div on x86 requires its first input in eax and the output be the same as the first input. + ASSERT_EQ(div->GetLiveInterval()->GetRegister(), 0); } // TODO: Enable this test for graph coloring register allocation when iterative move @@ -874,59 +859,57 @@ TEST_F(RegisterAllocatorTest, ExpectedExactInRegisterAndSameOutputHint_LinearSca // position. // This test only applies to the linear scan allocator. TEST_F(RegisterAllocatorTest, SpillInactive) { - ArenaPool pool; - // Create a synthesized graph to please the register_allocator and // ssa_liveness_analysis code. - ArenaAllocator allocator(&pool); - HGraph* graph = CreateGraph(&allocator); - HBasicBlock* entry = new (&allocator) HBasicBlock(graph); + HGraph* graph = CreateGraph(); + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(entry); graph->SetEntryBlock(entry); - HInstruction* one = new (&allocator) HParameterValue( + HInstruction* one = new (GetAllocator()) HParameterValue( graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); - HInstruction* two = new (&allocator) HParameterValue( + HInstruction* two = new (GetAllocator()) HParameterValue( graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); - HInstruction* three = new (&allocator) HParameterValue( + HInstruction* three = new (GetAllocator()) HParameterValue( graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); - HInstruction* four = new (&allocator) HParameterValue( + HInstruction* four = new (GetAllocator()) HParameterValue( graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); entry->AddInstruction(one); entry->AddInstruction(two); entry->AddInstruction(three); entry->AddInstruction(four); - HBasicBlock* block = new (&allocator) HBasicBlock(graph); + HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(block); entry->AddSuccessor(block); - block->AddInstruction(new (&allocator) HExit()); + block->AddInstruction(new (GetAllocator()) HExit()); // We create a synthesized user requesting a register, to avoid just spilling the // intervals. - HPhi* user = new (&allocator) HPhi(&allocator, 0, 1, DataType::Type::kInt32); + HPhi* user = new (GetAllocator()) HPhi(GetAllocator(), 0, 1, DataType::Type::kInt32); user->AddInput(one); user->SetBlock(block); - LocationSummary* locations = new (&allocator) LocationSummary(user, LocationSummary::kNoCall); + LocationSummary* locations = new (GetAllocator()) LocationSummary(user, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); static constexpr size_t phi_ranges[][2] = {{20, 30}}; - BuildInterval(phi_ranges, arraysize(phi_ranges), &allocator, -1, user); + BuildInterval(phi_ranges, arraysize(phi_ranges), GetScopedAllocator(), -1, user); // Create an interval with lifetime holes. static constexpr size_t ranges1[][2] = {{0, 2}, {4, 6}, {8, 10}}; - LiveInterval* first = BuildInterval(ranges1, arraysize(ranges1), &allocator, -1, one); - first->uses_.push_front(*new(&allocator) UsePosition(user, false, 8)); - first->uses_.push_front(*new(&allocator) UsePosition(user, false, 7)); - first->uses_.push_front(*new(&allocator) UsePosition(user, false, 6)); + LiveInterval* first = BuildInterval(ranges1, arraysize(ranges1), GetScopedAllocator(), -1, one); + first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 8)); + first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 7)); + first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 6)); - locations = new (&allocator) LocationSummary(first->GetDefinedBy(), LocationSummary::kNoCall); + locations = new (GetAllocator()) LocationSummary(first->GetDefinedBy(), LocationSummary::kNoCall); locations->SetOut(Location::RequiresRegister()); first = first->SplitAt(1); // Create an interval that conflicts with the next interval, to force the next // interval to call `AllocateBlockedReg`. static constexpr size_t ranges2[][2] = {{2, 4}}; - LiveInterval* second = BuildInterval(ranges2, arraysize(ranges2), &allocator, -1, two); - locations = new (&allocator) LocationSummary(second->GetDefinedBy(), LocationSummary::kNoCall); + LiveInterval* second = BuildInterval(ranges2, arraysize(ranges2), GetScopedAllocator(), -1, two); + locations = + new (GetAllocator()) LocationSummary(second->GetDefinedBy(), LocationSummary::kNoCall); locations->SetOut(Location::RequiresRegister()); // Create an interval that will lead to splitting the first interval. The bug occured @@ -935,31 +918,32 @@ TEST_F(RegisterAllocatorTest, SpillInactive) { // "[0, 2(, [4, 6(" in the list of handled intervals, even though we haven't processed intervals // before lifetime position 6 yet. static constexpr size_t ranges3[][2] = {{2, 4}, {8, 10}}; - LiveInterval* third = BuildInterval(ranges3, arraysize(ranges3), &allocator, -1, three); - third->uses_.push_front(*new(&allocator) UsePosition(user, false, 8)); - third->uses_.push_front(*new(&allocator) UsePosition(user, false, 4)); - third->uses_.push_front(*new(&allocator) UsePosition(user, false, 3)); - locations = new (&allocator) LocationSummary(third->GetDefinedBy(), LocationSummary::kNoCall); + LiveInterval* third = BuildInterval(ranges3, arraysize(ranges3), GetScopedAllocator(), -1, three); + third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 8)); + third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 4)); + third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 3)); + locations = new (GetAllocator()) LocationSummary(third->GetDefinedBy(), LocationSummary::kNoCall); locations->SetOut(Location::RequiresRegister()); third = third->SplitAt(3); // Because the first part of the split interval was considered handled, this interval // was free to allocate the same register, even though it conflicts with it. static constexpr size_t ranges4[][2] = {{4, 6}}; - LiveInterval* fourth = BuildInterval(ranges4, arraysize(ranges4), &allocator, -1, four); - locations = new (&allocator) LocationSummary(fourth->GetDefinedBy(), LocationSummary::kNoCall); + LiveInterval* fourth = BuildInterval(ranges4, arraysize(ranges4), GetScopedAllocator(), -1, four); + locations = + new (GetAllocator()) LocationSummary(fourth->GetDefinedBy(), LocationSummary::kNoCall); locations->SetOut(Location::RequiresRegister()); std::unique_ptr<const X86InstructionSetFeatures> features_x86( X86InstructionSetFeatures::FromCppDefines()); x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions()); - SsaLivenessAnalysis liveness(graph, &codegen); + SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator()); // Populate the instructions in the liveness object, to please the register allocator. for (size_t i = 0; i < 32; ++i) { liveness.instructions_from_lifetime_position_.push_back(user); } - RegisterAllocatorLinearScan register_allocator(&allocator, &codegen, liveness); + RegisterAllocatorLinearScan register_allocator(GetScopedAllocator(), &codegen, liveness); register_allocator.unhandled_core_intervals_.push_back(fourth); register_allocator.unhandled_core_intervals_.push_back(third); register_allocator.unhandled_core_intervals_.push_back(second); @@ -967,19 +951,18 @@ TEST_F(RegisterAllocatorTest, SpillInactive) { // Set just one register available to make all intervals compete for the same. register_allocator.number_of_registers_ = 1; - register_allocator.registers_array_ = allocator.AllocArray<size_t>(1); + register_allocator.registers_array_ = GetAllocator()->AllocArray<size_t>(1); register_allocator.processing_core_registers_ = true; register_allocator.unhandled_ = ®ister_allocator.unhandled_core_intervals_; register_allocator.LinearScan(); // Test that there is no conflicts between intervals. - ArenaVector<LiveInterval*> intervals(allocator.Adapter()); + ScopedArenaVector<LiveInterval*> intervals(GetScopedAllocator()->Adapter()); intervals.push_back(first); intervals.push_back(second); intervals.push_back(third); intervals.push_back(fourth); - ASSERT_TRUE(RegisterAllocator::ValidateIntervals( - intervals, 0, 0, codegen, &allocator, true, false)); + ASSERT_TRUE(ValidateIntervals(intervals, codegen)); } } // namespace art diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc index 5212e866cf..c673d54458 100644 --- a/compiler/optimizing/scheduler.cc +++ b/compiler/optimizing/scheduler.cc @@ -18,6 +18,8 @@ #include "scheduler.h" +#include "base/scoped_arena_allocator.h" +#include "base/scoped_arena_containers.h" #include "data_type-inl.h" #include "prepare_for_register_allocation.h" @@ -442,7 +444,7 @@ static void DumpAsDotNode(std::ostream& output, const SchedulingNode* node) { } void SchedulingGraph::DumpAsDotGraph(const std::string& description, - const ArenaVector<SchedulingNode*>& initial_candidates) { + const ScopedArenaVector<SchedulingNode*>& initial_candidates) { // TODO(xueliang): ideally we should move scheduling information into HInstruction, after that // we should move this dotty graph dump feature to visualizer, and have a compiler option for it. std::ofstream output("scheduling_graphs.dot", std::ofstream::out | std::ofstream::app); @@ -451,7 +453,7 @@ void SchedulingGraph::DumpAsDotGraph(const std::string& description, // Start the dot graph. Use an increasing index for easier differentiation. output << "digraph G {\n"; for (const auto& entry : nodes_map_) { - SchedulingNode* node = entry.second; + SchedulingNode* node = entry.second.get(); DumpAsDotNode(output, node); } // Create a fake 'end_of_scheduling' node to help visualization of critical_paths. @@ -466,7 +468,7 @@ void SchedulingGraph::DumpAsDotGraph(const std::string& description, } SchedulingNode* CriticalPathSchedulingNodeSelector::SelectMaterializedCondition( - ArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) const { + ScopedArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) const { // Schedule condition inputs that can be materialized immediately before their use. // In following example, after we've scheduled HSelect, we want LessThan to be scheduled // immediately, because it is a materialized condition, and will be emitted right before HSelect @@ -506,7 +508,7 @@ SchedulingNode* CriticalPathSchedulingNodeSelector::SelectMaterializedCondition( } SchedulingNode* CriticalPathSchedulingNodeSelector::PopHighestPriorityNode( - ArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) { + ScopedArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) { DCHECK(!nodes->empty()); SchedulingNode* select_node = nullptr; @@ -562,7 +564,7 @@ void HScheduler::Schedule(HGraph* graph) { } void HScheduler::Schedule(HBasicBlock* block) { - ArenaVector<SchedulingNode*> scheduling_nodes(arena_->Adapter(kArenaAllocScheduler)); + ScopedArenaVector<SchedulingNode*> scheduling_nodes(allocator_->Adapter(kArenaAllocScheduler)); // Build the scheduling graph. scheduling_graph_.Clear(); @@ -593,7 +595,7 @@ void HScheduler::Schedule(HBasicBlock* block) { } } - ArenaVector<SchedulingNode*> initial_candidates(arena_->Adapter(kArenaAllocScheduler)); + ScopedArenaVector<SchedulingNode*> initial_candidates(allocator_->Adapter(kArenaAllocScheduler)); if (kDumpDotSchedulingGraphs) { // Remember the list of initial candidates for debug output purposes. initial_candidates.assign(candidates_.begin(), candidates_.end()); @@ -779,7 +781,7 @@ void HInstructionScheduling::Run(bool only_optimize_loop_blocks, #if defined(ART_ENABLE_CODEGEN_arm64) || defined(ART_ENABLE_CODEGEN_arm) // Phase-local allocator that allocates scheduler internal data structures like // scheduling nodes, internel nodes map, dependencies, etc. - ArenaAllocator arena_allocator(graph_->GetArena()->GetArenaPool()); + ScopedArenaAllocator arena_allocator(graph_->GetArenaStack()); CriticalPathSchedulingNodeSelector critical_path_selector; RandomSchedulingNodeSelector random_selector; SchedulingNodeSelector* selector = schedule_randomly diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h index 66ffac5b7d..3efd26af9b 100644 --- a/compiler/optimizing/scheduler.h +++ b/compiler/optimizing/scheduler.h @@ -19,6 +19,8 @@ #include <fstream> +#include "base/scoped_arena_allocator.h" +#include "base/scoped_arena_containers.h" #include "base/time_utils.h" #include "code_generator.h" #include "driver/compiler_driver.h" @@ -152,16 +154,16 @@ class HScheduler; /** * A node representing an `HInstruction` in the `SchedulingGraph`. */ -class SchedulingNode : public ArenaObject<kArenaAllocScheduler> { +class SchedulingNode : public DeletableArenaObject<kArenaAllocScheduler> { public: - SchedulingNode(HInstruction* instr, ArenaAllocator* arena, bool is_scheduling_barrier) + SchedulingNode(HInstruction* instr, ScopedArenaAllocator* allocator, bool is_scheduling_barrier) : latency_(0), internal_latency_(0), critical_path_(0), instruction_(instr), is_scheduling_barrier_(is_scheduling_barrier), - data_predecessors_(arena->Adapter(kArenaAllocScheduler)), - other_predecessors_(arena->Adapter(kArenaAllocScheduler)), + data_predecessors_(allocator->Adapter(kArenaAllocScheduler)), + other_predecessors_(allocator->Adapter(kArenaAllocScheduler)), num_unscheduled_successors_(0) { data_predecessors_.reserve(kPreallocatedPredecessors); } @@ -171,11 +173,19 @@ class SchedulingNode : public ArenaObject<kArenaAllocScheduler> { predecessor->num_unscheduled_successors_++; } + const ScopedArenaVector<SchedulingNode*>& GetDataPredecessors() const { + return data_predecessors_; + } + void AddOtherPredecessor(SchedulingNode* predecessor) { other_predecessors_.push_back(predecessor); predecessor->num_unscheduled_successors_++; } + const ScopedArenaVector<SchedulingNode*>& GetOtherPredecessors() const { + return other_predecessors_; + } + void DecrementNumberOfUnscheduledSuccessors() { num_unscheduled_successors_--; } @@ -195,8 +205,6 @@ class SchedulingNode : public ArenaObject<kArenaAllocScheduler> { void SetInternalLatency(uint32_t internal_latency) { internal_latency_ = internal_latency; } uint32_t GetCriticalPath() const { return critical_path_; } bool IsSchedulingBarrier() const { return is_scheduling_barrier_; } - const ArenaVector<SchedulingNode*>& GetDataPredecessors() const { return data_predecessors_; } - const ArenaVector<SchedulingNode*>& GetOtherPredecessors() const { return other_predecessors_; } private: // The latency of this node. It represents the latency between the moment the @@ -227,8 +235,8 @@ class SchedulingNode : public ArenaObject<kArenaAllocScheduler> { // Predecessors in `data_predecessors_` are data dependencies. Those in // `other_predecessors_` contain side-effect dependencies, environment // dependencies, and scheduling barrier dependencies. - ArenaVector<SchedulingNode*> data_predecessors_; - ArenaVector<SchedulingNode*> other_predecessors_; + ScopedArenaVector<SchedulingNode*> data_predecessors_; + ScopedArenaVector<SchedulingNode*> other_predecessors_; // The number of unscheduled successors for this node. This number is // decremented as successors are scheduled. When it reaches zero this node @@ -243,19 +251,21 @@ class SchedulingNode : public ArenaObject<kArenaAllocScheduler> { */ class SchedulingGraph : public ValueObject { public: - SchedulingGraph(const HScheduler* scheduler, ArenaAllocator* arena) + SchedulingGraph(const HScheduler* scheduler, ScopedArenaAllocator* allocator) : scheduler_(scheduler), - arena_(arena), + arena_(allocator), contains_scheduling_barrier_(false), nodes_map_(arena_->Adapter(kArenaAllocScheduler)), heap_location_collector_(nullptr) {} SchedulingNode* AddNode(HInstruction* instr, bool is_scheduling_barrier = false) { - SchedulingNode* node = new (arena_) SchedulingNode(instr, arena_, is_scheduling_barrier); - nodes_map_.Insert(std::make_pair(instr, node)); + std::unique_ptr<SchedulingNode> node( + new (arena_) SchedulingNode(instr, arena_, is_scheduling_barrier)); + SchedulingNode* result = node.get(); + nodes_map_.Insert(std::make_pair(instr, std::move(node))); contains_scheduling_barrier_ |= is_scheduling_barrier; AddDependencies(instr, is_scheduling_barrier); - return node; + return result; } void Clear() { @@ -272,7 +282,7 @@ class SchedulingGraph : public ValueObject { if (it == nodes_map_.end()) { return nullptr; } else { - return it->second; + return it->second.get(); } } @@ -290,7 +300,7 @@ class SchedulingGraph : public ValueObject { // Dump the scheduling graph, in dot file format, appending it to the file // `scheduling_graphs.dot`. void DumpAsDotGraph(const std::string& description, - const ArenaVector<SchedulingNode*>& initial_candidates); + const ScopedArenaVector<SchedulingNode*>& initial_candidates); protected: void AddDependency(SchedulingNode* node, SchedulingNode* dependency, bool is_data_dependency); @@ -313,11 +323,11 @@ class SchedulingGraph : public ValueObject { const HScheduler* const scheduler_; - ArenaAllocator* const arena_; + ScopedArenaAllocator* const arena_; bool contains_scheduling_barrier_; - ArenaHashMap<const HInstruction*, SchedulingNode*> nodes_map_; + ScopedArenaHashMap<const HInstruction*, std::unique_ptr<SchedulingNode>> nodes_map_; const HeapLocationCollector* heap_location_collector_; }; @@ -367,11 +377,11 @@ class SchedulingLatencyVisitor : public HGraphDelegateVisitor { class SchedulingNodeSelector : public ArenaObject<kArenaAllocScheduler> { public: - virtual SchedulingNode* PopHighestPriorityNode(ArenaVector<SchedulingNode*>* nodes, + virtual SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) = 0; virtual ~SchedulingNodeSelector() {} protected: - static void DeleteNodeAtIndex(ArenaVector<SchedulingNode*>* nodes, size_t index) { + static void DeleteNodeAtIndex(ScopedArenaVector<SchedulingNode*>* nodes, size_t index) { (*nodes)[index] = nodes->back(); nodes->pop_back(); } @@ -387,7 +397,7 @@ class RandomSchedulingNodeSelector : public SchedulingNodeSelector { srand(seed_); } - SchedulingNode* PopHighestPriorityNode(ArenaVector<SchedulingNode*>* nodes, + SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) OVERRIDE { UNUSED(graph); DCHECK(!nodes->empty()); @@ -408,15 +418,15 @@ class CriticalPathSchedulingNodeSelector : public SchedulingNodeSelector { public: CriticalPathSchedulingNodeSelector() : prev_select_(nullptr) {} - SchedulingNode* PopHighestPriorityNode(ArenaVector<SchedulingNode*>* nodes, + SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) OVERRIDE; protected: SchedulingNode* GetHigherPrioritySchedulingNode(SchedulingNode* candidate, SchedulingNode* check) const; - SchedulingNode* SelectMaterializedCondition(ArenaVector<SchedulingNode*>* nodes, - const SchedulingGraph& graph) const; + SchedulingNode* SelectMaterializedCondition(ScopedArenaVector<SchedulingNode*>* nodes, + const SchedulingGraph& graph) const; private: const SchedulingNode* prev_select_; @@ -424,16 +434,16 @@ class CriticalPathSchedulingNodeSelector : public SchedulingNodeSelector { class HScheduler { public: - HScheduler(ArenaAllocator* arena, + HScheduler(ScopedArenaAllocator* allocator, SchedulingLatencyVisitor* latency_visitor, SchedulingNodeSelector* selector) - : arena_(arena), + : allocator_(allocator), latency_visitor_(latency_visitor), selector_(selector), only_optimize_loop_blocks_(true), - scheduling_graph_(this, arena), + scheduling_graph_(this, allocator), cursor_(nullptr), - candidates_(arena_->Adapter(kArenaAllocScheduler)) {} + candidates_(allocator_->Adapter(kArenaAllocScheduler)) {} virtual ~HScheduler() {} void Schedule(HGraph* graph); @@ -461,7 +471,7 @@ class HScheduler { node->SetInternalLatency(latency_visitor_->GetLastVisitedInternalLatency()); } - ArenaAllocator* const arena_; + ScopedArenaAllocator* const allocator_; SchedulingLatencyVisitor* const latency_visitor_; SchedulingNodeSelector* const selector_; bool only_optimize_loop_blocks_; @@ -473,7 +483,7 @@ class HScheduler { HInstruction* cursor_; // The list of candidates for scheduling. A node becomes a candidate when all // its predecessors have been scheduled. - ArenaVector<SchedulingNode*> candidates_; + ScopedArenaVector<SchedulingNode*> candidates_; private: DISALLOW_COPY_AND_ASSIGN(HScheduler); diff --git a/compiler/optimizing/scheduler_arm.h b/compiler/optimizing/scheduler_arm.h index fe274d29f9..0cb8684376 100644 --- a/compiler/optimizing/scheduler_arm.h +++ b/compiler/optimizing/scheduler_arm.h @@ -137,10 +137,10 @@ class SchedulingLatencyVisitorARM : public SchedulingLatencyVisitor { class HSchedulerARM : public HScheduler { public: - HSchedulerARM(ArenaAllocator* arena, + HSchedulerARM(ScopedArenaAllocator* allocator, SchedulingNodeSelector* selector, SchedulingLatencyVisitorARM* arm_latency_visitor) - : HScheduler(arena, arm_latency_visitor, selector) {} + : HScheduler(allocator, arm_latency_visitor, selector) {} ~HSchedulerARM() OVERRIDE {} bool IsSchedulable(const HInstruction* instruction) const OVERRIDE { diff --git a/compiler/optimizing/scheduler_arm64.h b/compiler/optimizing/scheduler_arm64.h index e1a80ec6fb..32f161f26a 100644 --- a/compiler/optimizing/scheduler_arm64.h +++ b/compiler/optimizing/scheduler_arm64.h @@ -131,8 +131,8 @@ class SchedulingLatencyVisitorARM64 : public SchedulingLatencyVisitor { class HSchedulerARM64 : public HScheduler { public: - HSchedulerARM64(ArenaAllocator* arena, SchedulingNodeSelector* selector) - : HScheduler(arena, &arm64_latency_visitor_, selector) {} + HSchedulerARM64(ScopedArenaAllocator* allocator, SchedulingNodeSelector* selector) + : HScheduler(allocator, &arm64_latency_visitor_, selector) {} ~HSchedulerARM64() OVERRIDE {} bool IsSchedulable(const HInstruction* instruction) const OVERRIDE { diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc index 0e6e0c5a3d..dfc1633fe6 100644 --- a/compiler/optimizing/scheduler_test.cc +++ b/compiler/optimizing/scheduler_test.cc @@ -71,16 +71,14 @@ static ::std::vector<CodegenTargetConfig> GetTargetConfigs() { return v; } -class SchedulerTest : public CommonCompilerTest { +class SchedulerTest : public OptimizingUnitTest { public: - SchedulerTest() : pool_(), allocator_(&pool_) { - graph_ = CreateGraph(&allocator_); - } + SchedulerTest() : graph_(CreateGraph()) { } // Build scheduling graph, and run target specific scheduling on it. void TestBuildDependencyGraphAndSchedule(HScheduler* scheduler) { - HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_); - HBasicBlock* block1 = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_); + HBasicBlock* block1 = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(entry); graph_->AddBlock(block1); graph_->SetEntryBlock(entry); @@ -100,23 +98,25 @@ class SchedulerTest : public CommonCompilerTest { // array_get2 ArrayGet [array, add1] // array_set2 ArraySet [array, add1, add2] - HInstruction* array = new (&allocator_) HParameterValue(graph_->GetDexFile(), - dex::TypeIndex(0), - 0, - DataType::Type::kReference); + HInstruction* array = new (GetAllocator()) HParameterValue(graph_->GetDexFile(), + dex::TypeIndex(0), + 0, + DataType::Type::kReference); HInstruction* c1 = graph_->GetIntConstant(1); HInstruction* c2 = graph_->GetIntConstant(10); - HInstruction* add1 = new (&allocator_) HAdd(DataType::Type::kInt32, c1, c2); - HInstruction* add2 = new (&allocator_) HAdd(DataType::Type::kInt32, add1, c2); - HInstruction* mul = new (&allocator_) HMul(DataType::Type::kInt32, add1, add2); - HInstruction* div_check = new (&allocator_) HDivZeroCheck(add2, 0); - HInstruction* div = new (&allocator_) HDiv(DataType::Type::kInt32, add1, div_check, 0); - HInstruction* array_get1 = new (&allocator_) HArrayGet(array, add1, DataType::Type::kInt32, 0); + HInstruction* add1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, c1, c2); + HInstruction* add2 = new (GetAllocator()) HAdd(DataType::Type::kInt32, add1, c2); + HInstruction* mul = new (GetAllocator()) HMul(DataType::Type::kInt32, add1, add2); + HInstruction* div_check = new (GetAllocator()) HDivZeroCheck(add2, 0); + HInstruction* div = new (GetAllocator()) HDiv(DataType::Type::kInt32, add1, div_check, 0); + HInstruction* array_get1 = + new (GetAllocator()) HArrayGet(array, add1, DataType::Type::kInt32, 0); HInstruction* array_set1 = - new (&allocator_) HArraySet(array, add1, add2, DataType::Type::kInt32, 0); - HInstruction* array_get2 = new (&allocator_) HArrayGet(array, add1, DataType::Type::kInt32, 0); + new (GetAllocator()) HArraySet(array, add1, add2, DataType::Type::kInt32, 0); + HInstruction* array_get2 = + new (GetAllocator()) HArrayGet(array, add1, DataType::Type::kInt32, 0); HInstruction* array_set2 = - new (&allocator_) HArraySet(array, add1, add2, DataType::Type::kInt32, 0); + new (GetAllocator()) HArraySet(array, add1, add2, DataType::Type::kInt32, 0); DCHECK(div_check->CanThrow()); @@ -135,18 +135,18 @@ class SchedulerTest : public CommonCompilerTest { block1->AddInstruction(instr); } - HEnvironment* environment = new (&allocator_) HEnvironment(&allocator_, - 2, - graph_->GetArtMethod(), - 0, - div_check); + HEnvironment* environment = new (GetAllocator()) HEnvironment(GetAllocator(), + 2, + graph_->GetArtMethod(), + 0, + div_check); div_check->SetRawEnvironment(environment); environment->SetRawEnvAt(0, add2); add2->AddEnvUseAt(div_check->GetEnvironment(), 0); environment->SetRawEnvAt(1, mul); mul->AddEnvUseAt(div_check->GetEnvironment(), 1); - SchedulingGraph scheduling_graph(scheduler, graph_->GetArena()); + SchedulingGraph scheduling_graph(scheduler, GetScopedAllocator()); // Instructions must be inserted in reverse order into the scheduling graph. for (HInstruction* instr : ReverseRange(block_instructions)) { scheduling_graph.AddNode(instr); @@ -184,7 +184,7 @@ class SchedulerTest : public CommonCompilerTest { void CompileWithRandomSchedulerAndRun(const uint16_t* data, bool has_result, int expected) { for (CodegenTargetConfig target_config : GetTargetConfigs()) { - HGraph* graph = CreateCFG(&allocator_, data); + HGraph* graph = CreateCFG(data); // Schedule the graph randomly. HInstructionScheduling scheduling(graph, target_config.GetInstructionSet()); @@ -198,55 +198,57 @@ class SchedulerTest : public CommonCompilerTest { } void TestDependencyGraphOnAliasingArrayAccesses(HScheduler* scheduler) { - HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_); + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(entry); graph_->SetEntryBlock(entry); graph_->BuildDominatorTree(); - HInstruction* arr = new (&allocator_) HParameterValue(graph_->GetDexFile(), - dex::TypeIndex(0), - 0, - DataType::Type::kReference); - HInstruction* i = new (&allocator_) HParameterValue(graph_->GetDexFile(), - dex::TypeIndex(1), - 1, - DataType::Type::kInt32); - HInstruction* j = new (&allocator_) HParameterValue(graph_->GetDexFile(), - dex::TypeIndex(1), - 1, - DataType::Type::kInt32); - HInstruction* object = new (&allocator_) HParameterValue(graph_->GetDexFile(), + HInstruction* arr = new (GetAllocator()) HParameterValue(graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); + HInstruction* i = new (GetAllocator()) HParameterValue(graph_->GetDexFile(), + dex::TypeIndex(1), + 1, + DataType::Type::kInt32); + HInstruction* j = new (GetAllocator()) HParameterValue(graph_->GetDexFile(), + dex::TypeIndex(1), + 1, + DataType::Type::kInt32); + HInstruction* object = new (GetAllocator()) HParameterValue(graph_->GetDexFile(), + dex::TypeIndex(0), + 0, + DataType::Type::kReference); HInstruction* c0 = graph_->GetIntConstant(0); HInstruction* c1 = graph_->GetIntConstant(1); - HInstruction* add0 = new (&allocator_) HAdd(DataType::Type::kInt32, i, c0); - HInstruction* add1 = new (&allocator_) HAdd(DataType::Type::kInt32, i, c1); - HInstruction* sub0 = new (&allocator_) HSub(DataType::Type::kInt32, i, c0); - HInstruction* sub1 = new (&allocator_) HSub(DataType::Type::kInt32, i, c1); - HInstruction* arr_set_0 = new (&allocator_) HArraySet(arr, c0, c0, DataType::Type::kInt32, 0); - HInstruction* arr_set_1 = new (&allocator_) HArraySet(arr, c1, c0, DataType::Type::kInt32, 0); - HInstruction* arr_set_i = new (&allocator_) HArraySet(arr, i, c0, DataType::Type::kInt32, 0); + HInstruction* add0 = new (GetAllocator()) HAdd(DataType::Type::kInt32, i, c0); + HInstruction* add1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, i, c1); + HInstruction* sub0 = new (GetAllocator()) HSub(DataType::Type::kInt32, i, c0); + HInstruction* sub1 = new (GetAllocator()) HSub(DataType::Type::kInt32, i, c1); + HInstruction* arr_set_0 = + new (GetAllocator()) HArraySet(arr, c0, c0, DataType::Type::kInt32, 0); + HInstruction* arr_set_1 = + new (GetAllocator()) HArraySet(arr, c1, c0, DataType::Type::kInt32, 0); + HInstruction* arr_set_i = new (GetAllocator()) HArraySet(arr, i, c0, DataType::Type::kInt32, 0); HInstruction* arr_set_add0 = - new (&allocator_) HArraySet(arr, add0, c0, DataType::Type::kInt32, 0); + new (GetAllocator()) HArraySet(arr, add0, c0, DataType::Type::kInt32, 0); HInstruction* arr_set_add1 = - new (&allocator_) HArraySet(arr, add1, c0, DataType::Type::kInt32, 0); + new (GetAllocator()) HArraySet(arr, add1, c0, DataType::Type::kInt32, 0); HInstruction* arr_set_sub0 = - new (&allocator_) HArraySet(arr, sub0, c0, DataType::Type::kInt32, 0); + new (GetAllocator()) HArraySet(arr, sub0, c0, DataType::Type::kInt32, 0); HInstruction* arr_set_sub1 = - new (&allocator_) HArraySet(arr, sub1, c0, DataType::Type::kInt32, 0); - HInstruction* arr_set_j = new (&allocator_) HArraySet(arr, j, c0, DataType::Type::kInt32, 0); - HInstanceFieldSet* set_field10 = new (&allocator_) HInstanceFieldSet(object, - c1, - nullptr, - DataType::Type::kInt32, - MemberOffset(10), - false, - kUnknownFieldIndex, - kUnknownClassDefIndex, - graph_->GetDexFile(), - 0); + new (GetAllocator()) HArraySet(arr, sub1, c0, DataType::Type::kInt32, 0); + HInstruction* arr_set_j = new (GetAllocator()) HArraySet(arr, j, c0, DataType::Type::kInt32, 0); + HInstanceFieldSet* set_field10 = new (GetAllocator()) HInstanceFieldSet(object, + c1, + nullptr, + DataType::Type::kInt32, + MemberOffset(10), + false, + kUnknownFieldIndex, + kUnknownClassDefIndex, + graph_->GetDexFile(), + 0); HInstruction* block_instructions[] = {arr, i, @@ -270,7 +272,7 @@ class SchedulerTest : public CommonCompilerTest { entry->AddInstruction(instr); } - SchedulingGraph scheduling_graph(scheduler, graph_->GetArena()); + SchedulingGraph scheduling_graph(scheduler, GetScopedAllocator()); HeapLocationCollector heap_location_collector(graph_); heap_location_collector.VisitBasicBlock(entry); heap_location_collector.BuildAliasingMatrix(); @@ -342,21 +344,19 @@ class SchedulerTest : public CommonCompilerTest { scheduler->Schedule(graph_); } - ArenaPool pool_; - ArenaAllocator allocator_; HGraph* graph_; }; #if defined(ART_ENABLE_CODEGEN_arm64) TEST_F(SchedulerTest, DependencyGraphAndSchedulerARM64) { CriticalPathSchedulingNodeSelector critical_path_selector; - arm64::HSchedulerARM64 scheduler(&allocator_, &critical_path_selector); + arm64::HSchedulerARM64 scheduler(GetScopedAllocator(), &critical_path_selector); TestBuildDependencyGraphAndSchedule(&scheduler); } TEST_F(SchedulerTest, ArrayAccessAliasingARM64) { CriticalPathSchedulingNodeSelector critical_path_selector; - arm64::HSchedulerARM64 scheduler(&allocator_, &critical_path_selector); + arm64::HSchedulerARM64 scheduler(GetScopedAllocator(), &critical_path_selector); TestDependencyGraphOnAliasingArrayAccesses(&scheduler); } #endif @@ -365,14 +365,14 @@ TEST_F(SchedulerTest, ArrayAccessAliasingARM64) { TEST_F(SchedulerTest, DependencyGraphAndSchedulerARM) { CriticalPathSchedulingNodeSelector critical_path_selector; arm::SchedulingLatencyVisitorARM arm_latency_visitor(/*CodeGenerator*/ nullptr); - arm::HSchedulerARM scheduler(&allocator_, &critical_path_selector, &arm_latency_visitor); + arm::HSchedulerARM scheduler(GetScopedAllocator(), &critical_path_selector, &arm_latency_visitor); TestBuildDependencyGraphAndSchedule(&scheduler); } TEST_F(SchedulerTest, ArrayAccessAliasingARM) { CriticalPathSchedulingNodeSelector critical_path_selector; arm::SchedulingLatencyVisitorARM arm_latency_visitor(/*CodeGenerator*/ nullptr); - arm::HSchedulerARM scheduler(&allocator_, &critical_path_selector, &arm_latency_visitor); + arm::HSchedulerARM scheduler(GetScopedAllocator(), &critical_path_selector, &arm_latency_visitor); TestDependencyGraphOnAliasingArrayAccesses(&scheduler); } #endif diff --git a/compiler/optimizing/select_generator.cc b/compiler/optimizing/select_generator.cc index 827b5913af..0e46aeca37 100644 --- a/compiler/optimizing/select_generator.cc +++ b/compiler/optimizing/select_generator.cc @@ -135,10 +135,10 @@ void HSelectGenerator::Run() { DCHECK(both_successors_return || phi != nullptr); // Create the Select instruction and insert it in front of the If. - HSelect* select = new (graph_->GetArena()) HSelect(if_instruction->InputAt(0), - true_value, - false_value, - if_instruction->GetDexPc()); + HSelect* select = new (graph_->GetAllocator()) HSelect(if_instruction->InputAt(0), + true_value, + false_value, + if_instruction->GetDexPc()); if (both_successors_return) { if (true_value->GetType() == DataType::Type::kReference) { DCHECK(false_value->GetType() == DataType::Type::kReference); diff --git a/compiler/optimizing/side_effects_analysis.h b/compiler/optimizing/side_effects_analysis.h index fea47e66d9..cf00e48e24 100644 --- a/compiler/optimizing/side_effects_analysis.h +++ b/compiler/optimizing/side_effects_analysis.h @@ -29,9 +29,9 @@ class SideEffectsAnalysis : public HOptimization { : HOptimization(graph, pass_name), graph_(graph), block_effects_(graph->GetBlocks().size(), - graph->GetArena()->Adapter(kArenaAllocSideEffectsAnalysis)), + graph->GetAllocator()->Adapter(kArenaAllocSideEffectsAnalysis)), loop_effects_(graph->GetBlocks().size(), - graph->GetArena()->Adapter(kArenaAllocSideEffectsAnalysis)) {} + graph->GetAllocator()->Adapter(kArenaAllocSideEffectsAnalysis)) {} SideEffects GetLoopEffects(HBasicBlock* block) const; SideEffects GetBlockEffects(HBasicBlock* block) const; diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc index 23563168a0..f4a8a17131 100644 --- a/compiler/optimizing/ssa_builder.cc +++ b/compiler/optimizing/ssa_builder.cc @@ -233,7 +233,7 @@ bool SsaBuilder::UpdatePrimitiveType(HPhi* phi, ArenaVector<HPhi*>* worklist) { } void SsaBuilder::RunPrimitiveTypePropagation() { - ArenaVector<HPhi*> worklist(graph_->GetArena()->Adapter(kArenaAllocGraphBuilder)); + ArenaVector<HPhi*> worklist(graph_->GetAllocator()->Adapter(kArenaAllocGraphBuilder)); for (HBasicBlock* block : graph_->GetReversePostOrder()) { if (block->IsLoopHeader()) { @@ -293,7 +293,7 @@ static HArrayGet* CreateFloatOrDoubleEquivalentOfArrayGet(HArrayGet* aget) { DCHECK(DataType::IsIntOrLongType(type)); DCHECK(FindFloatOrDoubleEquivalentOfArrayGet(aget) == nullptr); - HArrayGet* equivalent = new (aget->GetBlock()->GetGraph()->GetArena()) HArrayGet( + HArrayGet* equivalent = new (aget->GetBlock()->GetGraph()->GetAllocator()) HArrayGet( aget->GetArray(), aget->GetIndex(), type == DataType::Type::kInt32 ? DataType::Type::kFloat32 : DataType::Type::kFloat64, @@ -319,7 +319,7 @@ bool SsaBuilder::FixAmbiguousArrayOps() { // uses (because they are untyped) and environment uses (if --debuggable). // After resolving all ambiguous ArrayGets, we will re-run primitive type // propagation on the Phis which need to be updated. - ArenaVector<HPhi*> worklist(graph_->GetArena()->Adapter(kArenaAllocGraphBuilder)); + ArenaVector<HPhi*> worklist(graph_->GetAllocator()->Adapter(kArenaAllocGraphBuilder)); { ScopedObjectAccess soa(Thread::Current()); @@ -566,7 +566,7 @@ HFloatConstant* SsaBuilder::GetFloatEquivalent(HIntConstant* constant) { HFloatConstant* result = constant->GetNext()->AsFloatConstant(); if (result == nullptr) { float value = bit_cast<float, int32_t>(constant->GetValue()); - result = new (graph_->GetArena()) HFloatConstant(value); + result = new (graph_->GetAllocator()) HFloatConstant(value); constant->GetBlock()->InsertInstructionBefore(result, constant->GetNext()); graph_->CacheFloatConstant(result); } else { @@ -588,7 +588,7 @@ HDoubleConstant* SsaBuilder::GetDoubleEquivalent(HLongConstant* constant) { HDoubleConstant* result = constant->GetNext()->AsDoubleConstant(); if (result == nullptr) { double value = bit_cast<double, int64_t>(constant->GetValue()); - result = new (graph_->GetArena()) HDoubleConstant(value); + result = new (graph_->GetAllocator()) HDoubleConstant(value); constant->GetBlock()->InsertInstructionBefore(result, constant->GetNext()); graph_->CacheDoubleConstant(result); } else { @@ -621,7 +621,7 @@ HPhi* SsaBuilder::GetFloatDoubleOrReferenceEquivalentOfPhi(HPhi* phi, DataType:: if (next == nullptr || (next->AsPhi()->GetRegNumber() != phi->GetRegNumber()) || (next->GetType() != type)) { - ArenaAllocator* allocator = graph_->GetArena(); + ArenaAllocator* allocator = graph_->GetAllocator(); HInputsRef inputs = phi->GetInputs(); HPhi* new_phi = new (allocator) HPhi(allocator, phi->GetRegNumber(), inputs.size(), type); diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h index 1819ee568e..509cdc1252 100644 --- a/compiler/optimizing/ssa_builder.h +++ b/compiler/optimizing/ssa_builder.h @@ -56,9 +56,9 @@ class SsaBuilder : public ValueObject { dex_cache_(dex_cache), handles_(handles), agets_fixed_(false), - ambiguous_agets_(graph->GetArena()->Adapter(kArenaAllocGraphBuilder)), - ambiguous_asets_(graph->GetArena()->Adapter(kArenaAllocGraphBuilder)), - uninitialized_strings_(graph->GetArena()->Adapter(kArenaAllocGraphBuilder)) { + ambiguous_agets_(graph->GetAllocator()->Adapter(kArenaAllocGraphBuilder)), + ambiguous_asets_(graph->GetAllocator()->Adapter(kArenaAllocGraphBuilder)), + uninitialized_strings_(graph->GetAllocator()->Adapter(kArenaAllocGraphBuilder)) { graph_->InitializeInexactObjectRTI(handles); } diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc index f1f1be25d7..9ab7a89b33 100644 --- a/compiler/optimizing/ssa_liveness_analysis.cc +++ b/compiler/optimizing/ssa_liveness_analysis.cc @@ -26,7 +26,7 @@ namespace art { void SsaLivenessAnalysis::Analyze() { // Compute the linear order directly in the graph's data structure // (there are no more following graph mutations). - LinearizeGraph(graph_, graph_->GetArena(), &graph_->linear_order_); + LinearizeGraph(graph_, &graph_->linear_order_); // Liveness analysis. NumberInstructions(); @@ -56,7 +56,7 @@ void SsaLivenessAnalysis::NumberInstructions() { instructions_from_ssa_index_.push_back(current); current->SetSsaIndex(ssa_index++); current->SetLiveInterval( - LiveInterval::MakeInterval(graph_->GetArena(), current->GetType(), current)); + LiveInterval::MakeInterval(allocator_, current->GetType(), current)); } current->SetLifetimePosition(lifetime_position); } @@ -74,7 +74,7 @@ void SsaLivenessAnalysis::NumberInstructions() { instructions_from_ssa_index_.push_back(current); current->SetSsaIndex(ssa_index++); current->SetLiveInterval( - LiveInterval::MakeInterval(graph_->GetArena(), current->GetType(), current)); + LiveInterval::MakeInterval(allocator_, current->GetType(), current)); } instructions_from_lifetime_position_.push_back(current); current->SetLifetimePosition(lifetime_position); @@ -89,7 +89,7 @@ void SsaLivenessAnalysis::NumberInstructions() { void SsaLivenessAnalysis::ComputeLiveness() { for (HBasicBlock* block : graph_->GetLinearOrder()) { block_infos_[block->GetBlockId()] = - new (graph_->GetArena()) BlockInfo(graph_->GetArena(), *block, number_of_ssa_values_); + new (allocator_) BlockInfo(allocator_, *block, number_of_ssa_values_); } // Compute the live ranges, as well as the initial live_in, live_out, and kill sets. diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h index ec4ab31d61..9800af76f8 100644 --- a/compiler/optimizing/ssa_liveness_analysis.h +++ b/compiler/optimizing/ssa_liveness_analysis.h @@ -20,6 +20,8 @@ #include <iostream> #include "base/iteration_range.h" +#include "base/scoped_arena_allocator.h" +#include "base/scoped_arena_containers.h" #include "nodes.h" #include "utils/intrusive_forward_list.h" @@ -32,7 +34,7 @@ static constexpr int kNoRegister = -1; class BlockInfo : public ArenaObject<kArenaAllocSsaLiveness> { public: - BlockInfo(ArenaAllocator* allocator, const HBasicBlock& block, size_t number_of_ssa_values) + BlockInfo(ScopedArenaAllocator* allocator, const HBasicBlock& block, size_t number_of_ssa_values) : block_(block), live_in_(allocator, number_of_ssa_values, false, kArenaAllocSsaLiveness), live_out_(allocator, number_of_ssa_values, false, kArenaAllocSsaLiveness), @@ -82,7 +84,7 @@ class LiveRange FINAL : public ArenaObject<kArenaAllocSsaLiveness> { stream << "[" << start_ << "," << end_ << ")"; } - LiveRange* Dup(ArenaAllocator* allocator) const { + LiveRange* Dup(ScopedArenaAllocator* allocator) const { return new (allocator) LiveRange( start_, end_, next_ == nullptr ? nullptr : next_->Dup(allocator)); } @@ -135,7 +137,7 @@ class UsePosition : public ArenaObject<kArenaAllocSsaLiveness>, return user_->GetBlock()->GetLoopInformation(); } - UsePosition* Clone(ArenaAllocator* allocator) const { + UsePosition* Clone(ScopedArenaAllocator* allocator) const { return new (allocator) UsePosition(user_, input_index_, position_); } @@ -180,7 +182,7 @@ class EnvUsePosition : public ArenaObject<kArenaAllocSsaLiveness>, stream << position_; } - EnvUsePosition* Clone(ArenaAllocator* allocator) const { + EnvUsePosition* Clone(ScopedArenaAllocator* allocator) const { return new (allocator) EnvUsePosition(environment_, input_index_, position_); } @@ -261,17 +263,19 @@ class SafepointPosition : public ArenaObject<kArenaAllocSsaLiveness> { */ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> { public: - static LiveInterval* MakeInterval(ArenaAllocator* allocator, + static LiveInterval* MakeInterval(ScopedArenaAllocator* allocator, DataType::Type type, HInstruction* instruction = nullptr) { return new (allocator) LiveInterval(allocator, type, instruction); } - static LiveInterval* MakeFixedInterval(ArenaAllocator* allocator, int reg, DataType::Type type) { + static LiveInterval* MakeFixedInterval(ScopedArenaAllocator* allocator, + int reg, + DataType::Type type) { return new (allocator) LiveInterval(allocator, type, nullptr, true, reg, false); } - static LiveInterval* MakeTempInterval(ArenaAllocator* allocator, DataType::Type type) { + static LiveInterval* MakeTempInterval(ScopedArenaAllocator* allocator, DataType::Type type) { return new (allocator) LiveInterval(allocator, type, nullptr, false, kNoRegister, true); } @@ -969,7 +973,7 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> { } private: - LiveInterval(ArenaAllocator* allocator, + LiveInterval(ScopedArenaAllocator* allocator, DataType::Type type, HInstruction* defined_by = nullptr, bool is_fixed = false, @@ -1082,7 +1086,7 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> { } } - ArenaAllocator* const allocator_; + ScopedArenaAllocator* const allocator_; // Ranges of this interval. We need a quick access to the last range to test // for liveness (see `IsDeadAt`). @@ -1158,14 +1162,15 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> { */ class SsaLivenessAnalysis : public ValueObject { public: - SsaLivenessAnalysis(HGraph* graph, CodeGenerator* codegen) + SsaLivenessAnalysis(HGraph* graph, CodeGenerator* codegen, ScopedArenaAllocator* allocator) : graph_(graph), codegen_(codegen), + allocator_(allocator), block_infos_(graph->GetBlocks().size(), nullptr, - graph->GetArena()->Adapter(kArenaAllocSsaLiveness)), - instructions_from_ssa_index_(graph->GetArena()->Adapter(kArenaAllocSsaLiveness)), - instructions_from_lifetime_position_(graph->GetArena()->Adapter(kArenaAllocSsaLiveness)), + allocator_->Adapter(kArenaAllocSsaLiveness)), + instructions_from_ssa_index_(allocator_->Adapter(kArenaAllocSsaLiveness)), + instructions_from_lifetime_position_(allocator_->Adapter(kArenaAllocSsaLiveness)), number_of_ssa_values_(0) { } @@ -1284,13 +1289,18 @@ class SsaLivenessAnalysis : public ValueObject { HGraph* const graph_; CodeGenerator* const codegen_; - ArenaVector<BlockInfo*> block_infos_; + + // Use a local ScopedArenaAllocator for allocating memory. + // This allocator must remain alive while doing register allocation. + ScopedArenaAllocator* allocator_; + + ScopedArenaVector<BlockInfo*> block_infos_; // Temporary array used when computing live_in, live_out, and kill sets. - ArenaVector<HInstruction*> instructions_from_ssa_index_; + ScopedArenaVector<HInstruction*> instructions_from_ssa_index_; // Temporary array used when inserting moves in the graph. - ArenaVector<HInstruction*> instructions_from_lifetime_position_; + ScopedArenaVector<HInstruction*> instructions_from_lifetime_position_; size_t number_of_ssa_values_; ART_FRIEND_TEST(RegisterAllocatorTest, SpillInactive); diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc index e89bf6d801..9b78e0ee6c 100644 --- a/compiler/optimizing/ssa_liveness_analysis_test.cc +++ b/compiler/optimizing/ssa_liveness_analysis_test.cc @@ -27,12 +27,10 @@ namespace art { -class SsaLivenessAnalysisTest : public testing::Test { +class SsaLivenessAnalysisTest : public OptimizingUnitTest { public: SsaLivenessAnalysisTest() - : pool_(), - allocator_(&pool_), - graph_(CreateGraph(&allocator_)), + : graph_(CreateGraph()), compiler_options_(), instruction_set_(kRuntimeISA) { std::string error_msg; @@ -44,7 +42,7 @@ class SsaLivenessAnalysisTest : public testing::Test { compiler_options_); CHECK(codegen_ != nullptr) << instruction_set_ << " is not a supported target architecture."; // Create entry block. - entry_ = new (&allocator_) HBasicBlock(graph_); + entry_ = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(entry_); graph_->SetEntryBlock(entry_); } @@ -52,14 +50,12 @@ class SsaLivenessAnalysisTest : public testing::Test { protected: HBasicBlock* CreateSuccessor(HBasicBlock* block) { HGraph* graph = block->GetGraph(); - HBasicBlock* successor = new (&allocator_) HBasicBlock(graph); + HBasicBlock* successor = new (GetAllocator()) HBasicBlock(graph); graph->AddBlock(successor); block->AddSuccessor(successor); return successor; } - ArenaPool pool_; - ArenaAllocator allocator_; HGraph* graph_; CompilerOptions compiler_options_; InstructionSet instruction_set_; @@ -69,17 +65,17 @@ class SsaLivenessAnalysisTest : public testing::Test { }; TEST_F(SsaLivenessAnalysisTest, TestReturnArg) { - HInstruction* arg = new (&allocator_) HParameterValue( + HInstruction* arg = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); entry_->AddInstruction(arg); HBasicBlock* block = CreateSuccessor(entry_); - HInstruction* ret = new (&allocator_) HReturn(arg); + HInstruction* ret = new (GetAllocator()) HReturn(arg); block->AddInstruction(ret); - block->AddInstruction(new (&allocator_) HExit()); + block->AddInstruction(new (GetAllocator()) HExit()); graph_->BuildDominatorTree(); - SsaLivenessAnalysis ssa_analysis(graph_, codegen_.get()); + SsaLivenessAnalysis ssa_analysis(graph_, codegen_.get(), GetScopedAllocator()); ssa_analysis.Analyze(); std::ostringstream arg_dump; @@ -89,49 +85,49 @@ TEST_F(SsaLivenessAnalysisTest, TestReturnArg) { } TEST_F(SsaLivenessAnalysisTest, TestAput) { - HInstruction* array = new (&allocator_) HParameterValue( + HInstruction* array = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); - HInstruction* index = new (&allocator_) HParameterValue( + HInstruction* index = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32); - HInstruction* value = new (&allocator_) HParameterValue( + HInstruction* value = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(2), 2, DataType::Type::kInt32); - HInstruction* extra_arg1 = new (&allocator_) HParameterValue( + HInstruction* extra_arg1 = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(3), 3, DataType::Type::kInt32); - HInstruction* extra_arg2 = new (&allocator_) HParameterValue( + HInstruction* extra_arg2 = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(4), 4, DataType::Type::kReference); ArenaVector<HInstruction*> args({ array, index, value, extra_arg1, extra_arg2 }, - allocator_.Adapter()); + GetAllocator()->Adapter()); for (HInstruction* insn : args) { entry_->AddInstruction(insn); } HBasicBlock* block = CreateSuccessor(entry_); - HInstruction* null_check = new (&allocator_) HNullCheck(array, 0); + HInstruction* null_check = new (GetAllocator()) HNullCheck(array, 0); block->AddInstruction(null_check); - HEnvironment* null_check_env = new (&allocator_) HEnvironment(&allocator_, - /* number_of_vregs */ 5, - /* method */ nullptr, - /* dex_pc */ 0u, - null_check); + HEnvironment* null_check_env = new (GetAllocator()) HEnvironment(GetAllocator(), + /* number_of_vregs */ 5, + /* method */ nullptr, + /* dex_pc */ 0u, + null_check); null_check_env->CopyFrom(args); null_check->SetRawEnvironment(null_check_env); - HInstruction* length = new (&allocator_) HArrayLength(array, 0); + HInstruction* length = new (GetAllocator()) HArrayLength(array, 0); block->AddInstruction(length); - HInstruction* bounds_check = new (&allocator_) HBoundsCheck(index, length, /* dex_pc */ 0u); + HInstruction* bounds_check = new (GetAllocator()) HBoundsCheck(index, length, /* dex_pc */ 0u); block->AddInstruction(bounds_check); - HEnvironment* bounds_check_env = new (&allocator_) HEnvironment(&allocator_, - /* number_of_vregs */ 5, - /* method */ nullptr, - /* dex_pc */ 0u, - bounds_check); + HEnvironment* bounds_check_env = new (GetAllocator()) HEnvironment(GetAllocator(), + /* number_of_vregs */ 5, + /* method */ nullptr, + /* dex_pc */ 0u, + bounds_check); bounds_check_env->CopyFrom(args); bounds_check->SetRawEnvironment(bounds_check_env); HInstruction* array_set = - new (&allocator_) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0); + new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0); block->AddInstruction(array_set); graph_->BuildDominatorTree(); - SsaLivenessAnalysis ssa_analysis(graph_, codegen_.get()); + SsaLivenessAnalysis ssa_analysis(graph_, codegen_.get(), GetScopedAllocator()); ssa_analysis.Analyze(); EXPECT_FALSE(graph_->IsDebuggable()); @@ -159,53 +155,53 @@ TEST_F(SsaLivenessAnalysisTest, TestAput) { } TEST_F(SsaLivenessAnalysisTest, TestDeoptimize) { - HInstruction* array = new (&allocator_) HParameterValue( + HInstruction* array = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); - HInstruction* index = new (&allocator_) HParameterValue( + HInstruction* index = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32); - HInstruction* value = new (&allocator_) HParameterValue( + HInstruction* value = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(2), 2, DataType::Type::kInt32); - HInstruction* extra_arg1 = new (&allocator_) HParameterValue( + HInstruction* extra_arg1 = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(3), 3, DataType::Type::kInt32); - HInstruction* extra_arg2 = new (&allocator_) HParameterValue( + HInstruction* extra_arg2 = new (GetAllocator()) HParameterValue( graph_->GetDexFile(), dex::TypeIndex(4), 4, DataType::Type::kReference); ArenaVector<HInstruction*> args({ array, index, value, extra_arg1, extra_arg2 }, - allocator_.Adapter()); + GetAllocator()->Adapter()); for (HInstruction* insn : args) { entry_->AddInstruction(insn); } HBasicBlock* block = CreateSuccessor(entry_); - HInstruction* null_check = new (&allocator_) HNullCheck(array, 0); + HInstruction* null_check = new (GetAllocator()) HNullCheck(array, 0); block->AddInstruction(null_check); - HEnvironment* null_check_env = new (&allocator_) HEnvironment(&allocator_, - /* number_of_vregs */ 5, - /* method */ nullptr, - /* dex_pc */ 0u, - null_check); + HEnvironment* null_check_env = new (GetAllocator()) HEnvironment(GetAllocator(), + /* number_of_vregs */ 5, + /* method */ nullptr, + /* dex_pc */ 0u, + null_check); null_check_env->CopyFrom(args); null_check->SetRawEnvironment(null_check_env); - HInstruction* length = new (&allocator_) HArrayLength(array, 0); + HInstruction* length = new (GetAllocator()) HArrayLength(array, 0); block->AddInstruction(length); // Use HAboveOrEqual+HDeoptimize as the bounds check. - HInstruction* ae = new (&allocator_) HAboveOrEqual(index, length); + HInstruction* ae = new (GetAllocator()) HAboveOrEqual(index, length); block->AddInstruction(ae); - HInstruction* deoptimize = - new(&allocator_) HDeoptimize(&allocator_, ae, DeoptimizationKind::kBlockBCE, /* dex_pc */ 0u); + HInstruction* deoptimize = new(GetAllocator()) HDeoptimize( + GetAllocator(), ae, DeoptimizationKind::kBlockBCE, /* dex_pc */ 0u); block->AddInstruction(deoptimize); - HEnvironment* deoptimize_env = new (&allocator_) HEnvironment(&allocator_, - /* number_of_vregs */ 5, - /* method */ nullptr, - /* dex_pc */ 0u, - deoptimize); + HEnvironment* deoptimize_env = new (GetAllocator()) HEnvironment(GetAllocator(), + /* number_of_vregs */ 5, + /* method */ nullptr, + /* dex_pc */ 0u, + deoptimize); deoptimize_env->CopyFrom(args); deoptimize->SetRawEnvironment(deoptimize_env); HInstruction* array_set = - new (&allocator_) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0); + new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0); block->AddInstruction(array_set); graph_->BuildDominatorTree(); - SsaLivenessAnalysis ssa_analysis(graph_, codegen_.get()); + SsaLivenessAnalysis ssa_analysis(graph_, codegen_.get(), GetScopedAllocator()); ssa_analysis.Analyze(); EXPECT_FALSE(graph_->IsDebuggable()); diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc index b4f8408a76..3b95b86268 100644 --- a/compiler/optimizing/ssa_phi_elimination.cc +++ b/compiler/optimizing/ssa_phi_elimination.cc @@ -31,7 +31,7 @@ void SsaDeadPhiElimination::MarkDeadPhis() { // Phis are constructed live and should not be revived if previously marked // dead. This algorithm temporarily breaks that invariant but we DCHECK that // only phis which were initially live are revived. - ArenaSet<HPhi*> initially_live(graph_->GetArena()->Adapter(kArenaAllocSsaPhiElimination)); + ArenaSet<HPhi*> initially_live(graph_->GetAllocator()->Adapter(kArenaAllocSsaPhiElimination)); // Add to the worklist phis referenced by non-phi instructions. for (HBasicBlock* block : graph_->GetReversePostOrder()) { @@ -123,11 +123,11 @@ void SsaRedundantPhiElimination::Run() { } } - ArenaBitVector visited_phis_in_cycle(graph_->GetArena(), + ArenaBitVector visited_phis_in_cycle(graph_->GetAllocator(), graph_->GetCurrentInstructionId(), /* expandable */ false, kArenaAllocSsaPhiElimination); - ArenaVector<HPhi*> cycle_worklist(graph_->GetArena()->Adapter(kArenaAllocSsaPhiElimination)); + ArenaVector<HPhi*> cycle_worklist(graph_->GetAllocator()->Adapter(kArenaAllocSsaPhiElimination)); while (!worklist_.empty()) { HPhi* phi = worklist_.back(); diff --git a/compiler/optimizing/ssa_phi_elimination.h b/compiler/optimizing/ssa_phi_elimination.h index b48e8200d5..e0cde074d6 100644 --- a/compiler/optimizing/ssa_phi_elimination.h +++ b/compiler/optimizing/ssa_phi_elimination.h @@ -31,7 +31,7 @@ class SsaDeadPhiElimination : public HOptimization { public: explicit SsaDeadPhiElimination(HGraph* graph) : HOptimization(graph, kSsaDeadPhiEliminationPassName), - worklist_(graph->GetArena()->Adapter(kArenaAllocSsaPhiElimination)) { + worklist_(graph->GetAllocator()->Adapter(kArenaAllocSsaPhiElimination)) { worklist_.reserve(kDefaultWorklistSize); } @@ -60,7 +60,7 @@ class SsaRedundantPhiElimination : public HOptimization { public: explicit SsaRedundantPhiElimination(HGraph* graph) : HOptimization(graph, kSsaRedundantPhiEliminationPassName), - worklist_(graph->GetArena()->Adapter(kArenaAllocSsaPhiElimination)) { + worklist_(graph->GetAllocator()->Adapter(kArenaAllocSsaPhiElimination)) { worklist_.reserve(kDefaultWorklistSize); } diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc index ac998dbcab..e08904e84b 100644 --- a/compiler/optimizing/ssa_test.cc +++ b/compiler/optimizing/ssa_test.cc @@ -29,7 +29,10 @@ namespace art { -class SsaTest : public CommonCompilerTest {}; +class SsaTest : public OptimizingUnitTest { + protected: + void TestCode(const uint16_t* data, const char* expected); +}; class SsaPrettyPrinter : public HPrettyPrinter { public: @@ -77,10 +80,8 @@ static void ReNumberInstructions(HGraph* graph) { } } -static void TestCode(const uint16_t* data, const char* expected) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - HGraph* graph = CreateCFG(&allocator, data); +void SsaTest::TestCode(const uint16_t* data, const char* expected) { + HGraph* graph = CreateCFG(data); // Suspend checks implementation may change in the future, and this test relies // on how instructions are ordered. RemoveSuspendChecks(graph); diff --git a/compiler/optimizing/suspend_check_test.cc b/compiler/optimizing/suspend_check_test.cc index 15cd4e8a08..88336b0009 100644 --- a/compiler/optimizing/suspend_check_test.cc +++ b/compiler/optimizing/suspend_check_test.cc @@ -28,10 +28,13 @@ namespace art { * Check that the HGraphBuilder adds suspend checks to backward branches. */ -static void TestCode(const uint16_t* data) { - ArenaPool pool; - ArenaAllocator allocator(&pool); - HGraph* graph = CreateCFG(&allocator, data); +class SuspendCheckTest : public OptimizingUnitTest { + protected: + void TestCode(const uint16_t* data); +}; + +void SuspendCheckTest::TestCode(const uint16_t* data) { + HGraph* graph = CreateCFG(data); HBasicBlock* first_block = graph->GetEntryBlock()->GetSingleSuccessor(); HBasicBlock* loop_header = first_block->GetSingleSuccessor(); ASSERT_TRUE(loop_header->IsLoopHeader()); @@ -39,8 +42,6 @@ static void TestCode(const uint16_t* data) { ASSERT_TRUE(loop_header->GetFirstInstruction()->IsSuspendCheck()); } -class SuspendCheckTest : public CommonCompilerTest {}; - TEST_F(SuspendCheckTest, CFG1) { const uint16_t data[] = ZERO_REGISTER_CODE_ITEM( Instruction::NOP, diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc index 70f290d60e..1e9a521aa8 100644 --- a/compiler/trampolines/trampoline_compiler.cc +++ b/compiler/trampolines/trampoline_compiler.cc @@ -57,11 +57,11 @@ namespace arm { #endif static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline( - ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset32 offset) { + ArenaAllocator* allocator, EntryPointCallingConvention abi, ThreadOffset32 offset) { using vixl::aarch32::MemOperand; using vixl::aarch32::pc; using vixl::aarch32::r0; - ArmVIXLAssembler assembler(arena); + ArmVIXLAssembler assembler(allocator); switch (abi) { case kInterpreterAbi: // Thread* is first argument (R0) in interpreter ABI. @@ -98,8 +98,8 @@ static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline( #ifdef ART_ENABLE_CODEGEN_arm64 namespace arm64 { static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline( - ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset64 offset) { - Arm64Assembler assembler(arena); + ArenaAllocator* allocator, EntryPointCallingConvention abi, ThreadOffset64 offset) { + Arm64Assembler assembler(allocator); switch (abi) { case kInterpreterAbi: // Thread* is first argument (X0) in interpreter ABI. @@ -137,8 +137,8 @@ static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline( #ifdef ART_ENABLE_CODEGEN_mips namespace mips { static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline( - ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset32 offset) { - MipsAssembler assembler(arena); + ArenaAllocator* allocator, EntryPointCallingConvention abi, ThreadOffset32 offset) { + MipsAssembler assembler(allocator); switch (abi) { case kInterpreterAbi: // Thread* is first argument (A0) in interpreter ABI. @@ -169,8 +169,8 @@ static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline( #ifdef ART_ENABLE_CODEGEN_mips64 namespace mips64 { static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline( - ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset64 offset) { - Mips64Assembler assembler(arena); + ArenaAllocator* allocator, EntryPointCallingConvention abi, ThreadOffset64 offset) { + Mips64Assembler assembler(allocator); switch (abi) { case kInterpreterAbi: // Thread* is first argument (A0) in interpreter ABI. @@ -200,9 +200,9 @@ static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline( #ifdef ART_ENABLE_CODEGEN_x86 namespace x86 { -static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* arena, +static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* allocator, ThreadOffset32 offset) { - X86Assembler assembler(arena); + X86Assembler assembler(allocator); // All x86 trampolines call via the Thread* held in fs. __ fs()->jmp(Address::Absolute(offset)); @@ -221,9 +221,9 @@ static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocat #ifdef ART_ENABLE_CODEGEN_x86_64 namespace x86_64 { -static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* arena, +static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* allocator, ThreadOffset64 offset) { - x86_64::X86_64Assembler assembler(arena); + x86_64::X86_64Assembler assembler(allocator); // All x86 trampolines call via the Thread* held in gs. __ gs()->jmp(x86_64::Address::Absolute(offset, true)); diff --git a/compiler/utils/arm/assembler_arm_vixl.h b/compiler/utils/arm/assembler_arm_vixl.h index 9c11fd3222..0e73e6bf9e 100644 --- a/compiler/utils/arm/assembler_arm_vixl.h +++ b/compiler/utils/arm/assembler_arm_vixl.h @@ -151,8 +151,8 @@ class ArmVIXLAssembler FINAL : public Assembler { private: class ArmException; public: - explicit ArmVIXLAssembler(ArenaAllocator* arena) - : Assembler(arena) { + explicit ArmVIXLAssembler(ArenaAllocator* allocator) + : Assembler(allocator) { // Use Thumb2 instruction set. vixl_masm_.UseT32(); } diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc index ed57ca68e2..0bae4d4b69 100644 --- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc +++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc @@ -117,7 +117,8 @@ void ArmVIXLJNIMacroAssembler::BuildFrame(size_t frame_size, } void ArmVIXLJNIMacroAssembler::RemoveFrame(size_t frame_size, - ArrayRef<const ManagedRegister> callee_save_regs) { + ArrayRef<const ManagedRegister> callee_save_regs, + bool may_suspend) { CHECK_ALIGNED(frame_size, kStackAlignment); cfi().RememberState(); @@ -152,9 +153,33 @@ void ArmVIXLJNIMacroAssembler::RemoveFrame(size_t frame_size, ___ Pop(RegisterList(core_spill_mask)); if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { - // Refresh Mark Register. - // TODO: Refresh MR only if suspend is taken. - ___ Ldr(mr, MemOperand(tr, Thread::IsGcMarkingOffset<kArmPointerSize>().Int32Value())); + if (may_suspend) { + // The method may be suspended; refresh the Marking Register. + ___ Ldr(mr, MemOperand(tr, Thread::IsGcMarkingOffset<kArmPointerSize>().Int32Value())); + } else { + // The method shall not be suspended; no need to refresh the Marking Register. + + // Check that the Marking Register is a callee-save register, + // and thus has been preserved by native code following the + // AAPCS calling convention. + DCHECK_NE(core_spill_mask & (1 << MR), 0) + << "core_spill_mask should contain Marking Register R" << MR; + + // The following condition is a compile-time one, so it does not have a run-time cost. + if (kIsDebugBuild) { + // The following condition is a run-time one; it is executed after the + // previous compile-time test, to avoid penalizing non-debug builds. + if (emit_run_time_checks_in_debug_mode_) { + // Emit a run-time check verifying that the Marking Register is up-to-date. + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + vixl32::Register temp = temps.Acquire(); + // Ensure we are not clobbering a callee-save register that was restored before. + DCHECK_EQ(core_spill_mask & (1 << temp.GetCode()), 0) + << "core_spill_mask hould not contain scratch register R" << temp.GetCode(); + asm_.GenerateMarkingRegisterCheck(temp); + } + } + } } // Return to LR. diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h index f3baf1f062..e239004506 100644 --- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h +++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h @@ -35,9 +35,9 @@ class ArmVIXLJNIMacroAssembler FINAL private: class ArmException; public: - explicit ArmVIXLJNIMacroAssembler(ArenaAllocator* arena) - : JNIMacroAssemblerFwd(arena), - exception_blocks_(arena->Adapter(kArenaAllocAssembler)) {} + explicit ArmVIXLJNIMacroAssembler(ArenaAllocator* allocator) + : JNIMacroAssemblerFwd(allocator), + exception_blocks_(allocator->Adapter(kArenaAllocAssembler)) {} virtual ~ArmVIXLJNIMacroAssembler() {} void FinalizeCode() OVERRIDE; @@ -54,7 +54,8 @@ class ArmVIXLJNIMacroAssembler FINAL // Emit code that will remove an activation from the stack. void RemoveFrame(size_t frame_size, - ArrayRef<const ManagedRegister> callee_save_regs) OVERRIDE; + ArrayRef<const ManagedRegister> callee_save_regs, + bool may_suspend) OVERRIDE; void IncreaseFrameSize(size_t adjust) OVERRIDE; void DecreaseFrameSize(size_t adjust) OVERRIDE; diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h index 6b28363a8f..e5ec24add0 100644 --- a/compiler/utils/arm64/assembler_arm64.h +++ b/compiler/utils/arm64/assembler_arm64.h @@ -61,7 +61,7 @@ enum StoreOperandType { class Arm64Assembler FINAL : public Assembler { public: - explicit Arm64Assembler(ArenaAllocator* arena) : Assembler(arena) {} + explicit Arm64Assembler(ArenaAllocator* allocator) : Assembler(allocator) {} virtual ~Arm64Assembler() {} diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.cc b/compiler/utils/arm64/jni_macro_assembler_arm64.cc index 9732b765a1..573bb6d4be 100644 --- a/compiler/utils/arm64/jni_macro_assembler_arm64.cc +++ b/compiler/utils/arm64/jni_macro_assembler_arm64.cc @@ -743,7 +743,8 @@ void Arm64JNIMacroAssembler::BuildFrame(size_t frame_size, } void Arm64JNIMacroAssembler::RemoveFrame(size_t frame_size, - ArrayRef<const ManagedRegister> callee_save_regs) { + ArrayRef<const ManagedRegister> callee_save_regs, + bool may_suspend) { // Setup VIXL CPURegList for callee-saves. CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0); CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0); @@ -773,10 +774,36 @@ void Arm64JNIMacroAssembler::RemoveFrame(size_t frame_size, asm_.UnspillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size); if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { - // Refresh Mark Register. - // TODO: Refresh MR only if suspend is taken. - ___ Ldr(reg_w(MR), - MemOperand(reg_x(TR), Thread::IsGcMarkingOffset<kArm64PointerSize>().Int32Value())); + vixl::aarch64::Register mr = reg_x(MR); // Marking Register. + vixl::aarch64::Register tr = reg_x(TR); // Thread Register. + + if (may_suspend) { + // The method may be suspended; refresh the Marking Register. + ___ Ldr(mr.W(), MemOperand(tr, Thread::IsGcMarkingOffset<kArm64PointerSize>().Int32Value())); + } else { + // The method shall not be suspended; no need to refresh the Marking Register. + + // Check that the Marking Register is a callee-save register, + // and thus has been preserved by native code following the + // AAPCS64 calling convention. + DCHECK(core_reg_list.IncludesAliasOf(mr)) + << "core_reg_list should contain Marking Register X" << mr.GetCode(); + + // The following condition is a compile-time one, so it does not have a run-time cost. + if (kIsDebugBuild) { + // The following condition is a run-time one; it is executed after the + // previous compile-time test, to avoid penalizing non-debug builds. + if (emit_run_time_checks_in_debug_mode_) { + // Emit a run-time check verifying that the Marking Register is up-to-date. + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + Register temp = temps.AcquireW(); + // Ensure we are not clobbering a callee-save register that was restored before. + DCHECK(!core_reg_list.IncludesAliasOf(temp.X())) + << "core_reg_list should not contain scratch register X" << temp.GetCode(); + asm_.GenerateMarkingRegisterCheck(temp); + } + } + } } // Decrease frame size to start of callee saved regs. diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.h b/compiler/utils/arm64/jni_macro_assembler_arm64.h index baf0434de0..fda87aa573 100644 --- a/compiler/utils/arm64/jni_macro_assembler_arm64.h +++ b/compiler/utils/arm64/jni_macro_assembler_arm64.h @@ -40,9 +40,9 @@ namespace arm64 { class Arm64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<Arm64Assembler, PointerSize::k64> { public: - explicit Arm64JNIMacroAssembler(ArenaAllocator* arena) - : JNIMacroAssemblerFwd(arena), - exception_blocks_(arena->Adapter(kArenaAllocAssembler)) {} + explicit Arm64JNIMacroAssembler(ArenaAllocator* allocator) + : JNIMacroAssemblerFwd(allocator), + exception_blocks_(allocator->Adapter(kArenaAllocAssembler)) {} ~Arm64JNIMacroAssembler(); @@ -56,8 +56,9 @@ class Arm64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<Arm64Assembler, const ManagedRegisterEntrySpills& entry_spills) OVERRIDE; // Emit code that will remove an activation from the stack. - void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs) - OVERRIDE; + void RemoveFrame(size_t frame_size, + ArrayRef<const ManagedRegister> callee_save_regs, + bool may_suspend) OVERRIDE; void IncreaseFrameSize(size_t adjust) OVERRIDE; void DecreaseFrameSize(size_t adjust) OVERRIDE; diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc index 25eca23af6..944c64b591 100644 --- a/compiler/utils/assembler.cc +++ b/compiler/utils/assembler.cc @@ -25,10 +25,10 @@ namespace art { -AssemblerBuffer::AssemblerBuffer(ArenaAllocator* arena) - : arena_(arena) { +AssemblerBuffer::AssemblerBuffer(ArenaAllocator* allocator) + : allocator_(allocator) { static const size_t kInitialBufferCapacity = 4 * KB; - contents_ = arena_->AllocArray<uint8_t>(kInitialBufferCapacity, kArenaAllocAssembler); + contents_ = allocator_->AllocArray<uint8_t>(kInitialBufferCapacity, kArenaAllocAssembler); cursor_ = contents_; limit_ = ComputeLimit(contents_, kInitialBufferCapacity); fixup_ = nullptr; @@ -45,8 +45,8 @@ AssemblerBuffer::AssemblerBuffer(ArenaAllocator* arena) AssemblerBuffer::~AssemblerBuffer() { - if (arena_->IsRunningOnMemoryTool()) { - arena_->MakeInaccessible(contents_, Capacity()); + if (allocator_->IsRunningOnMemoryTool()) { + allocator_->MakeInaccessible(contents_, Capacity()); } } @@ -81,7 +81,7 @@ void AssemblerBuffer::ExtendCapacity(size_t min_capacity) { // Allocate the new data area and copy contents of the old one to it. contents_ = reinterpret_cast<uint8_t*>( - arena_->Realloc(contents_, old_capacity, new_capacity, kArenaAllocAssembler)); + allocator_->Realloc(contents_, old_capacity, new_capacity, kArenaAllocAssembler)); // Update the cursor and recompute the limit. cursor_ = contents_ + old_size; diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h index 314ff8cf7a..dbd35abfcf 100644 --- a/compiler/utils/assembler.h +++ b/compiler/utils/assembler.h @@ -89,11 +89,11 @@ class SlowPath : public DeletableArenaObject<kArenaAllocAssembler> { class AssemblerBuffer { public: - explicit AssemblerBuffer(ArenaAllocator* arena); + explicit AssemblerBuffer(ArenaAllocator* allocator); ~AssemblerBuffer(); - ArenaAllocator* GetArena() { - return arena_; + ArenaAllocator* GetAllocator() { + return allocator_; } // Basic support for emitting, loading, and storing. @@ -252,7 +252,7 @@ class AssemblerBuffer { // for a single, fast space check per instruction. static const int kMinimumGap = 32; - ArenaAllocator* arena_; + ArenaAllocator* allocator_; uint8_t* contents_; uint8_t* cursor_; uint8_t* limit_; @@ -392,8 +392,8 @@ class Assembler : public DeletableArenaObject<kArenaAllocAssembler> { */ DebugFrameOpCodeWriterForAssembler& cfi() { return cfi_; } - ArenaAllocator* GetArena() { - return buffer_.GetArena(); + ArenaAllocator* GetAllocator() { + return buffer_.GetAllocator(); } AssemblerBuffer* GetBuffer() { @@ -401,7 +401,7 @@ class Assembler : public DeletableArenaObject<kArenaAllocAssembler> { } protected: - explicit Assembler(ArenaAllocator* arena) : buffer_(arena), cfi_(this) {} + explicit Assembler(ArenaAllocator* allocator) : buffer_(allocator), cfi_(this) {} AssemblerBuffer buffer_; diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h index 227954e21b..11a9b91600 100644 --- a/compiler/utils/assembler_test.h +++ b/compiler/utils/assembler_test.h @@ -741,8 +741,8 @@ class AssemblerTest : public testing::Test { } // Override this to set up any architecture-specific things, e.g., CPU revision. - virtual Ass* CreateAssembler(ArenaAllocator* arena) { - return new (arena) Ass(arena); + virtual Ass* CreateAssembler(ArenaAllocator* allocator) { + return new (allocator) Ass(allocator); } // Override this to set up any architecture-specific things, e.g., register vectors. diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc index 4dbe71b8c7..5622f89529 100644 --- a/compiler/utils/assembler_thumb_test.cc +++ b/compiler/utils/assembler_thumb_test.cc @@ -285,7 +285,7 @@ TEST_F(ArmVIXLAssemblerTest, VixlJniHelpers) { __ DecreaseFrameSize(4096); __ DecreaseFrameSize(32); - __ RemoveFrame(frame_size, callee_save_regs); + __ RemoveFrame(frame_size, callee_save_regs, /* may_suspend */ true); EmitAndCheck(&assembler, "VixlJniHelpers"); } diff --git a/compiler/utils/jni_macro_assembler.cc b/compiler/utils/jni_macro_assembler.cc index 3ac6c3ca7a..0616b35a39 100644 --- a/compiler/utils/jni_macro_assembler.cc +++ b/compiler/utils/jni_macro_assembler.cc @@ -47,7 +47,7 @@ using MacroAsm32UniquePtr = std::unique_ptr<JNIMacroAssembler<PointerSize::k32>> template <> MacroAsm32UniquePtr JNIMacroAssembler<PointerSize::k32>::Create( - ArenaAllocator* arena, + ArenaAllocator* allocator, InstructionSet instruction_set, const InstructionSetFeatures* instruction_set_features) { #ifndef ART_ENABLE_CODEGEN_mips @@ -58,19 +58,19 @@ MacroAsm32UniquePtr JNIMacroAssembler<PointerSize::k32>::Create( #ifdef ART_ENABLE_CODEGEN_arm case kArm: case kThumb2: - return MacroAsm32UniquePtr(new (arena) arm::ArmVIXLJNIMacroAssembler(arena)); + return MacroAsm32UniquePtr(new (allocator) arm::ArmVIXLJNIMacroAssembler(allocator)); #endif #ifdef ART_ENABLE_CODEGEN_mips case kMips: - return MacroAsm32UniquePtr(new (arena) mips::MipsAssembler( - arena, + return MacroAsm32UniquePtr(new (allocator) mips::MipsAssembler( + allocator, instruction_set_features != nullptr ? instruction_set_features->AsMipsInstructionSetFeatures() : nullptr)); #endif #ifdef ART_ENABLE_CODEGEN_x86 case kX86: - return MacroAsm32UniquePtr(new (arena) x86::X86JNIMacroAssembler(arena)); + return MacroAsm32UniquePtr(new (allocator) x86::X86JNIMacroAssembler(allocator)); #endif default: LOG(FATAL) << "Unknown/unsupported 4B InstructionSet: " << instruction_set; @@ -82,7 +82,7 @@ using MacroAsm64UniquePtr = std::unique_ptr<JNIMacroAssembler<PointerSize::k64>> template <> MacroAsm64UniquePtr JNIMacroAssembler<PointerSize::k64>::Create( - ArenaAllocator* arena, + ArenaAllocator* allocator, InstructionSet instruction_set, const InstructionSetFeatures* instruction_set_features) { #ifndef ART_ENABLE_CODEGEN_mips64 @@ -92,22 +92,22 @@ MacroAsm64UniquePtr JNIMacroAssembler<PointerSize::k64>::Create( switch (instruction_set) { #ifdef ART_ENABLE_CODEGEN_arm64 case kArm64: - return MacroAsm64UniquePtr(new (arena) arm64::Arm64JNIMacroAssembler(arena)); + return MacroAsm64UniquePtr(new (allocator) arm64::Arm64JNIMacroAssembler(allocator)); #endif #ifdef ART_ENABLE_CODEGEN_mips64 case kMips64: - return MacroAsm64UniquePtr(new (arena) mips64::Mips64Assembler( - arena, + return MacroAsm64UniquePtr(new (allocator) mips64::Mips64Assembler( + allocator, instruction_set_features != nullptr ? instruction_set_features->AsMips64InstructionSetFeatures() : nullptr)); #endif #ifdef ART_ENABLE_CODEGEN_x86_64 case kX86_64: - return MacroAsm64UniquePtr(new (arena) x86_64::X86_64JNIMacroAssembler(arena)); + return MacroAsm64UniquePtr(new (allocator) x86_64::X86_64JNIMacroAssembler(allocator)); #endif default: - UNUSED(arena); + UNUSED(allocator); LOG(FATAL) << "Unknown/unsupported 8B InstructionSet: " << instruction_set; UNREACHABLE(); } diff --git a/compiler/utils/jni_macro_assembler.h b/compiler/utils/jni_macro_assembler.h index a8ca1119e5..0fc1353bf5 100644 --- a/compiler/utils/jni_macro_assembler.h +++ b/compiler/utils/jni_macro_assembler.h @@ -46,7 +46,7 @@ template <PointerSize kPointerSize> class JNIMacroAssembler : public DeletableArenaObject<kArenaAllocAssembler> { public: static std::unique_ptr<JNIMacroAssembler<kPointerSize>> Create( - ArenaAllocator* arena, + ArenaAllocator* allocator, InstructionSet instruction_set, const InstructionSetFeatures* instruction_set_features = nullptr); @@ -66,7 +66,13 @@ class JNIMacroAssembler : public DeletableArenaObject<kArenaAllocAssembler> { const ManagedRegisterEntrySpills& entry_spills) = 0; // Emit code that will remove an activation from the stack - virtual void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs) = 0; + // + // Argument `may_suspend` must be `true` if the compiled method may be + // suspended during its execution (otherwise `false`, if it is impossible + // to suspend during its execution). + virtual void RemoveFrame(size_t frame_size, + ArrayRef<const ManagedRegister> callee_save_regs, + bool may_suspend) = 0; virtual void IncreaseFrameSize(size_t adjust) = 0; virtual void DecreaseFrameSize(size_t adjust) = 0; @@ -269,7 +275,7 @@ class JNIMacroAssemblerFwd : public JNIMacroAssembler<kPointerSize> { } protected: - explicit JNIMacroAssemblerFwd(ArenaAllocator* arena) : asm_(arena) {} + explicit JNIMacroAssemblerFwd(ArenaAllocator* allocator) : asm_(allocator) {} T asm_; }; diff --git a/compiler/utils/jni_macro_assembler_test.h b/compiler/utils/jni_macro_assembler_test.h index 61296802f8..ba95e212bb 100644 --- a/compiler/utils/jni_macro_assembler_test.h +++ b/compiler/utils/jni_macro_assembler_test.h @@ -80,8 +80,8 @@ class JNIMacroAssemblerTest : public testing::Test { } // Override this to set up any architecture-specific things, e.g., CPU revision. - virtual Ass* CreateAssembler(ArenaAllocator* arena) { - return new (arena) Ass(arena); + virtual Ass* CreateAssembler(ArenaAllocator* allocator) { + return new (allocator) Ass(allocator); } // Override this to set up any architecture-specific things, e.g., register vectors. diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc index b300cc597f..b83e3f5471 100644 --- a/compiler/utils/mips/assembler_mips.cc +++ b/compiler/utils/mips/assembler_mips.cc @@ -5016,7 +5016,8 @@ void MipsAssembler::BuildFrame(size_t frame_size, } void MipsAssembler::RemoveFrame(size_t frame_size, - ArrayRef<const ManagedRegister> callee_save_regs) { + ArrayRef<const ManagedRegister> callee_save_regs, + bool may_suspend ATTRIBUTE_UNUSED) { CHECK_ALIGNED(frame_size, kStackAlignment); DCHECK(!overwriting_); cfi_.RememberState(); diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h index 0b4eb9ca55..57b3edd03a 100644 --- a/compiler/utils/mips/assembler_mips.h +++ b/compiler/utils/mips/assembler_mips.h @@ -192,16 +192,16 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi public: using JNIBase = JNIMacroAssembler<PointerSize::k32>; - explicit MipsAssembler(ArenaAllocator* arena, + explicit MipsAssembler(ArenaAllocator* allocator, const MipsInstructionSetFeatures* instruction_set_features = nullptr) - : Assembler(arena), + : Assembler(allocator), overwriting_(false), overwrite_location_(0), reordering_(true), ds_fsm_state_(kExpectingLabel), ds_fsm_target_pc_(0), - literals_(arena->Adapter(kArenaAllocAssembler)), - jump_tables_(arena->Adapter(kArenaAllocAssembler)), + literals_(allocator->Adapter(kArenaAllocAssembler)), + jump_tables_(allocator->Adapter(kArenaAllocAssembler)), last_position_adjustment_(0), last_old_position_(0), last_branch_id_(0), @@ -1090,8 +1090,9 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi const ManagedRegisterEntrySpills& entry_spills) OVERRIDE; // Emit code that will remove an activation from the stack. - void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs) - OVERRIDE; + void RemoveFrame(size_t frame_size, + ArrayRef<const ManagedRegister> callee_save_regs, + bool may_suspend) OVERRIDE; void IncreaseFrameSize(size_t adjust) OVERRIDE; void DecreaseFrameSize(size_t adjust) OVERRIDE; diff --git a/compiler/utils/mips/assembler_mips32r5_test.cc b/compiler/utils/mips/assembler_mips32r5_test.cc index a3662db935..9a69ffd3dd 100644 --- a/compiler/utils/mips/assembler_mips32r5_test.cc +++ b/compiler/utils/mips/assembler_mips32r5_test.cc @@ -72,8 +72,8 @@ class AssemblerMIPS32r5Test : public AssemblerTest<mips::MipsAssembler, return " -D -bbinary -mmips:isa32r5"; } - mips::MipsAssembler* CreateAssembler(ArenaAllocator* arena) OVERRIDE { - return new (arena) mips::MipsAssembler(arena, instruction_set_features_.get()); + mips::MipsAssembler* CreateAssembler(ArenaAllocator* allocator) OVERRIDE { + return new (allocator) mips::MipsAssembler(allocator, instruction_set_features_.get()); } void SetUpHelpers() OVERRIDE { diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc index b6cb30a6f0..b12b6b651c 100644 --- a/compiler/utils/mips/assembler_mips32r6_test.cc +++ b/compiler/utils/mips/assembler_mips32r6_test.cc @@ -85,8 +85,8 @@ class AssemblerMIPS32r6Test : public AssemblerTest<mips::MipsAssembler, return " -D -bbinary -mmips:isa32r6"; } - mips::MipsAssembler* CreateAssembler(ArenaAllocator* arena) OVERRIDE { - return new (arena) mips::MipsAssembler(arena, instruction_set_features_.get()); + mips::MipsAssembler* CreateAssembler(ArenaAllocator* allocator) OVERRIDE { + return new (allocator) mips::MipsAssembler(allocator, instruction_set_features_.get()); } void SetUpHelpers() OVERRIDE { diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc index 183b5e507b..606d4c39d0 100644 --- a/compiler/utils/mips64/assembler_mips64.cc +++ b/compiler/utils/mips64/assembler_mips64.cc @@ -3406,7 +3406,8 @@ void Mips64Assembler::BuildFrame(size_t frame_size, } void Mips64Assembler::RemoveFrame(size_t frame_size, - ArrayRef<const ManagedRegister> callee_save_regs) { + ArrayRef<const ManagedRegister> callee_save_regs, + bool may_suspend ATTRIBUTE_UNUSED) { CHECK_ALIGNED(frame_size, kStackAlignment); DCHECK(!overwriting_); cfi_.RememberState(); diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h index bb54382811..a3787ac6ae 100644 --- a/compiler/utils/mips64/assembler_mips64.h +++ b/compiler/utils/mips64/assembler_mips64.h @@ -418,14 +418,14 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer public: using JNIBase = JNIMacroAssembler<PointerSize::k64>; - explicit Mips64Assembler(ArenaAllocator* arena, + explicit Mips64Assembler(ArenaAllocator* allocator, const Mips64InstructionSetFeatures* instruction_set_features = nullptr) - : Assembler(arena), + : Assembler(allocator), overwriting_(false), overwrite_location_(0), - literals_(arena->Adapter(kArenaAllocAssembler)), - long_literals_(arena->Adapter(kArenaAllocAssembler)), - jump_tables_(arena->Adapter(kArenaAllocAssembler)), + literals_(allocator->Adapter(kArenaAllocAssembler)), + long_literals_(allocator->Adapter(kArenaAllocAssembler)), + jump_tables_(allocator->Adapter(kArenaAllocAssembler)), last_position_adjustment_(0), last_old_position_(0), last_branch_id_(0), @@ -1278,7 +1278,9 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer const ManagedRegisterEntrySpills& entry_spills) OVERRIDE; // Emit code that will remove an activation from the stack. - void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs) OVERRIDE; + void RemoveFrame(size_t frame_size, + ArrayRef<const ManagedRegister> callee_save_regs, + bool may_suspend) OVERRIDE; void IncreaseFrameSize(size_t adjust) OVERRIDE; void DecreaseFrameSize(size_t adjust) OVERRIDE; diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc index 16a36f9069..bf0326de87 100644 --- a/compiler/utils/mips64/assembler_mips64_test.cc +++ b/compiler/utils/mips64/assembler_mips64_test.cc @@ -83,8 +83,8 @@ class AssemblerMIPS64Test : public AssemblerTest<mips64::Mips64Assembler, return " -D -bbinary -mmips:isa64r6"; } - mips64::Mips64Assembler* CreateAssembler(ArenaAllocator* arena) OVERRIDE { - return new (arena) mips64::Mips64Assembler(arena, instruction_set_features_.get()); + mips64::Mips64Assembler* CreateAssembler(ArenaAllocator* allocator) OVERRIDE { + return new (allocator) mips64::Mips64Assembler(allocator, instruction_set_features_.get()); } void SetUpHelpers() OVERRIDE { diff --git a/compiler/utils/test_dex_file_builder.h b/compiler/utils/test_dex_file_builder.h index 9ba3903033..e6501e0b83 100644 --- a/compiler/utils/test_dex_file_builder.h +++ b/compiler/utils/test_dex_file_builder.h @@ -26,7 +26,8 @@ #include "base/bit_utils.h" #include "base/logging.h" -#include "dex_file.h" +#include "dex_file_loader.h" +#include "native_dex_file.h" namespace art { @@ -88,8 +89,8 @@ class TestDexFileBuilder { } header_data; std::memset(header_data.data, 0, sizeof(header_data.data)); DexFile::Header* header = reinterpret_cast<DexFile::Header*>(&header_data.data); - std::copy_n(DexFile::kDexMagic, 4u, header->magic_); - std::copy_n(DexFile::kDexMagicVersions[0], 4u, header->magic_ + 4u); + std::copy_n(NativeDexFile::kDexMagic, 4u, header->magic_); + std::copy_n(NativeDexFile::kDexMagicVersions[0], 4u, header->magic_ + 4u); header->header_size_ = sizeof(DexFile::Header); header->endian_tag_ = DexFile::kDexEndianConstant; header->link_size_ = 0u; // Unused. @@ -231,7 +232,7 @@ class TestDexFileBuilder { static constexpr bool kVerify = false; static constexpr bool kVerifyChecksum = false; std::string error_msg; - std::unique_ptr<const DexFile> dex_file(DexFile::Open( + std::unique_ptr<const DexFile> dex_file(DexFileLoader::Open( &dex_file_data_[0], dex_file_data_.size(), dex_location, diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h index dce3ad228c..f3b516cb7e 100644 --- a/compiler/utils/x86/assembler_x86.h +++ b/compiler/utils/x86/assembler_x86.h @@ -266,7 +266,8 @@ class NearLabel : private Label { */ class ConstantArea { public: - explicit ConstantArea(ArenaAllocator* arena) : buffer_(arena->Adapter(kArenaAllocAssembler)) {} + explicit ConstantArea(ArenaAllocator* allocator) + : buffer_(allocator->Adapter(kArenaAllocAssembler)) {} // Add a double to the constant area, returning the offset into // the constant area where the literal resides. @@ -307,7 +308,8 @@ class ConstantArea { class X86Assembler FINAL : public Assembler { public: - explicit X86Assembler(ArenaAllocator* arena) : Assembler(arena), constant_area_(arena) {} + explicit X86Assembler(ArenaAllocator* allocator) + : Assembler(allocator), constant_area_(allocator) {} virtual ~X86Assembler() {} /* diff --git a/compiler/utils/x86/jni_macro_assembler_x86.cc b/compiler/utils/x86/jni_macro_assembler_x86.cc index e074346e01..7e29c4aa26 100644 --- a/compiler/utils/x86/jni_macro_assembler_x86.cc +++ b/compiler/utils/x86/jni_macro_assembler_x86.cc @@ -85,7 +85,8 @@ void X86JNIMacroAssembler::BuildFrame(size_t frame_size, } void X86JNIMacroAssembler::RemoveFrame(size_t frame_size, - ArrayRef<const ManagedRegister> spill_regs) { + ArrayRef<const ManagedRegister> spill_regs, + bool may_suspend ATTRIBUTE_UNUSED) { CHECK_ALIGNED(frame_size, kStackAlignment); cfi().RememberState(); // -kFramePointerSize for ArtMethod*. @@ -517,7 +518,7 @@ void X86JNIMacroAssembler::GetCurrentThread(FrameOffset offset, } void X86JNIMacroAssembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) { - X86ExceptionSlowPath* slow = new (__ GetArena()) X86ExceptionSlowPath(stack_adjust); + X86ExceptionSlowPath* slow = new (__ GetAllocator()) X86ExceptionSlowPath(stack_adjust); __ GetBuffer()->EnqueueSlowPath(slow); __ fs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()), Immediate(0)); __ j(kNotEqual, slow->Entry()); diff --git a/compiler/utils/x86/jni_macro_assembler_x86.h b/compiler/utils/x86/jni_macro_assembler_x86.h index 8ffda6425e..56eaf1951e 100644 --- a/compiler/utils/x86/jni_macro_assembler_x86.h +++ b/compiler/utils/x86/jni_macro_assembler_x86.h @@ -34,7 +34,7 @@ class X86JNIMacroLabel; class X86JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86Assembler, PointerSize::k32> { public: - explicit X86JNIMacroAssembler(ArenaAllocator* arena) : JNIMacroAssemblerFwd(arena) {} + explicit X86JNIMacroAssembler(ArenaAllocator* allocator) : JNIMacroAssemblerFwd(allocator) {} virtual ~X86JNIMacroAssembler() {} // @@ -48,8 +48,9 @@ class X86JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86Assembler, Poi const ManagedRegisterEntrySpills& entry_spills) OVERRIDE; // Emit code that will remove an activation from the stack - void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs) - OVERRIDE; + void RemoveFrame(size_t frame_size, + ArrayRef<const ManagedRegister> callee_save_regs, + bool may_suspend) OVERRIDE; void IncreaseFrameSize(size_t adjust) OVERRIDE; void DecreaseFrameSize(size_t adjust) OVERRIDE; diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h index 11304443e0..0d24a751c0 100644 --- a/compiler/utils/x86_64/assembler_x86_64.h +++ b/compiler/utils/x86_64/assembler_x86_64.h @@ -290,7 +290,8 @@ std::ostream& operator<<(std::ostream& os, const Address& addr); */ class ConstantArea { public: - explicit ConstantArea(ArenaAllocator* arena) : buffer_(arena->Adapter(kArenaAllocAssembler)) {} + explicit ConstantArea(ArenaAllocator* allocator) + : buffer_(allocator->Adapter(kArenaAllocAssembler)) {} // Add a double to the constant area, returning the offset into // the constant area where the literal resides. @@ -352,7 +353,8 @@ class NearLabel : private Label { class X86_64Assembler FINAL : public Assembler { public: - explicit X86_64Assembler(ArenaAllocator* arena) : Assembler(arena), constant_area_(arena) {} + explicit X86_64Assembler(ArenaAllocator* allocator) + : Assembler(allocator), constant_area_(allocator) {} virtual ~X86_64Assembler() {} /* diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc index aff8871025..b08ba4a03a 100644 --- a/compiler/utils/x86_64/assembler_x86_64_test.cc +++ b/compiler/utils/x86_64/assembler_x86_64_test.cc @@ -2043,7 +2043,7 @@ std::string removeframe_test_fn(JNIMacroAssemblerX86_64Test::Base* assembler_tes ArrayRef<const ManagedRegister> spill_regs(raw_spill_regs); size_t frame_size = 10 * kStackAlignment; - assembler->RemoveFrame(frame_size, spill_regs); + assembler->RemoveFrame(frame_size, spill_regs, /* may_suspend */ true); // Construct assembly text counterpart. std::ostringstream str; diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc index ec86254cfc..5766f9d44b 100644 --- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc +++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc @@ -100,7 +100,8 @@ void X86_64JNIMacroAssembler::BuildFrame(size_t frame_size, } void X86_64JNIMacroAssembler::RemoveFrame(size_t frame_size, - ArrayRef<const ManagedRegister> spill_regs) { + ArrayRef<const ManagedRegister> spill_regs, + bool may_suspend ATTRIBUTE_UNUSED) { CHECK_ALIGNED(frame_size, kStackAlignment); cfi().RememberState(); int gpr_count = 0; @@ -583,9 +584,10 @@ class X86_64ExceptionSlowPath FINAL : public SlowPath { }; void X86_64JNIMacroAssembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) { - X86_64ExceptionSlowPath* slow = new (__ GetArena()) X86_64ExceptionSlowPath(stack_adjust); + X86_64ExceptionSlowPath* slow = new (__ GetAllocator()) X86_64ExceptionSlowPath(stack_adjust); __ GetBuffer()->EnqueueSlowPath(slow); - __ gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true), Immediate(0)); + __ gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true), + Immediate(0)); __ j(kNotEqual, slow->Entry()); } diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h index aa058f7454..d1a3032a56 100644 --- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h +++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h @@ -34,8 +34,8 @@ namespace x86_64 { class X86_64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86_64Assembler, PointerSize::k64> { public: - explicit X86_64JNIMacroAssembler(ArenaAllocator* arena) - : JNIMacroAssemblerFwd<X86_64Assembler, PointerSize::k64>(arena) {} + explicit X86_64JNIMacroAssembler(ArenaAllocator* allocator) + : JNIMacroAssemblerFwd<X86_64Assembler, PointerSize::k64>(allocator) {} virtual ~X86_64JNIMacroAssembler() {} // @@ -49,8 +49,9 @@ class X86_64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86_64Assemble const ManagedRegisterEntrySpills& entry_spills) OVERRIDE; // Emit code that will remove an activation from the stack - void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs) - OVERRIDE; + void RemoveFrame(size_t frame_size, + ArrayRef<const ManagedRegister> callee_save_regs, + bool may_suspend) OVERRIDE; void IncreaseFrameSize(size_t adjust) OVERRIDE; void DecreaseFrameSize(size_t adjust) OVERRIDE; diff --git a/dex2oat/Android.bp b/dex2oat/Android.bp index c9125df8f4..a93b0e7f0c 100644 --- a/dex2oat/Android.bp +++ b/dex2oat/Android.bp @@ -91,7 +91,10 @@ cc_defaults { name: "dex2oat-defaults", host_supported: true, defaults: ["art_defaults"], - srcs: ["dex2oat.cc"], + srcs: [ + "dex2oat_options.cc", + "dex2oat.cc", + ], target: { android: { diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index 7b4653107f..528cf3a0a7 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -20,11 +20,13 @@ #include <sys/stat.h> #include "base/memory_tool.h" +#include <forward_list> #include <fstream> #include <iostream> #include <limits> #include <sstream> #include <string> +#include <type_traits> #include <unordered_set> #include <vector> @@ -50,16 +52,19 @@ #include "base/unix_file/fd_file.h" #include "class_linker.h" #include "class_loader_context.h" +#include "cmdline_parser.h" #include "compiler.h" #include "compiler_callbacks.h" #include "debug/elf_debug_writer.h" #include "debug/method_debug_info.h" #include "dex/quick_compiler_callbacks.h" #include "dex/verification_results.h" +#include "dex2oat_options.h" #include "dex2oat_return_codes.h" #include "dex_file-inl.h" #include "driver/compiler_driver.h" #include "driver/compiler_options.h" +#include "driver/compiler_options_map-inl.h" #include "elf_file.h" #include "gc/space/image_space.h" #include "gc/space/space-inl.h" @@ -235,6 +240,13 @@ NO_RETURN static void Usage(const char* fmt, ...) { UsageError(" --oat-fd=<number>: specifies the oat output destination via a file descriptor."); UsageError(" Example: --oat-fd=6"); UsageError(""); + UsageError(" --input-vdex-fd=<number>: specifies the vdex input source via a file descriptor."); + UsageError(" Example: --input-vdex-fd=6"); + UsageError(""); + UsageError(" --output-vdex-fd=<number>: specifies the vdex output destination via a file"); + UsageError(" descriptor."); + UsageError(" Example: --output-vdex-fd=6"); + UsageError(""); UsageError(" --oat-location=<oat-name>: specifies a symbolic name for the file corresponding"); UsageError(" to the file descriptor specified by --oat-fd."); UsageError(" Example: --oat-location=/data/dalvik-cache/system@app@Calculator.apk.oat"); @@ -659,76 +671,27 @@ class Dex2Oat FINAL { std::string error_msg; }; - void ParseZipFd(const StringPiece& option) { - ParseUintOption(option, "--zip-fd", &zip_fd_, Usage); - } - - void ParseInputVdexFd(const StringPiece& option) { - // Note that the input vdex fd might be -1. - ParseIntOption(option, "--input-vdex-fd", &input_vdex_fd_, Usage); - } - - void ParseOutputVdexFd(const StringPiece& option) { - ParseUintOption(option, "--output-vdex-fd", &output_vdex_fd_, Usage); - } - - void ParseOatFd(const StringPiece& option) { - ParseUintOption(option, "--oat-fd", &oat_fd_, Usage); - } - - void ParseFdForCollection(const StringPiece& option, - const char* arg_name, - std::vector<uint32_t>* fds) { - uint32_t fd; - ParseUintOption(option, arg_name, &fd, Usage); - fds->push_back(fd); - } - - void ParseJ(const StringPiece& option) { - ParseUintOption(option, "-j", &thread_count_, Usage, /* is_long_option */ false); - } - - void ParseBase(const StringPiece& option) { - DCHECK(option.starts_with("--base=")); - const char* image_base_str = option.substr(strlen("--base=")).data(); + void ParseBase(const std::string& option) { char* end; - image_base_ = strtoul(image_base_str, &end, 16); - if (end == image_base_str || *end != '\0') { + image_base_ = strtoul(option.c_str(), &end, 16); + if (end == option.c_str() || *end != '\0') { Usage("Failed to parse hexadecimal value for option %s", option.data()); } } - void ParseInstructionSet(const StringPiece& option) { - DCHECK(option.starts_with("--instruction-set=")); - StringPiece instruction_set_str = option.substr(strlen("--instruction-set=")).data(); - // StringPiece is not necessarily zero-terminated, so need to make a copy and ensure it. - std::unique_ptr<char[]> buf(new char[instruction_set_str.length() + 1]); - strncpy(buf.get(), instruction_set_str.data(), instruction_set_str.length()); - buf.get()[instruction_set_str.length()] = 0; - instruction_set_ = GetInstructionSetFromString(buf.get()); - // arm actually means thumb2. - if (instruction_set_ == InstructionSet::kArm) { - instruction_set_ = InstructionSet::kThumb2; - } - } - bool VerifyProfileData() { return profile_compilation_info_->VerifyProfileData(dex_files_); } - void ParseInstructionSetVariant(const StringPiece& option, ParserOptions* parser_options) { - DCHECK(option.starts_with("--instruction-set-variant=")); - StringPiece str = option.substr(strlen("--instruction-set-variant=")).data(); + void ParseInstructionSetVariant(const std::string& option, ParserOptions* parser_options) { instruction_set_features_ = InstructionSetFeatures::FromVariant( - instruction_set_, str.as_string(), &parser_options->error_msg); + instruction_set_, option, &parser_options->error_msg); if (instruction_set_features_.get() == nullptr) { Usage("%s", parser_options->error_msg.c_str()); } } - void ParseInstructionSetFeatures(const StringPiece& option, ParserOptions* parser_options) { - DCHECK(option.starts_with("--instruction-set-features=")); - StringPiece str = option.substr(strlen("--instruction-set-features=")).data(); + void ParseInstructionSetFeatures(const std::string& option, ParserOptions* parser_options) { if (instruction_set_features_ == nullptr) { instruction_set_features_ = InstructionSetFeatures::FromVariant( instruction_set_, "default", &parser_options->error_msg); @@ -738,38 +701,9 @@ class Dex2Oat FINAL { } } instruction_set_features_ = - instruction_set_features_->AddFeaturesFromString(str.as_string(), - &parser_options->error_msg); + instruction_set_features_->AddFeaturesFromString(option, &parser_options->error_msg); if (instruction_set_features_ == nullptr) { - Usage("Error parsing '%s': %s", option.data(), parser_options->error_msg.c_str()); - } - } - - void ParseCompilerBackend(const StringPiece& option, ParserOptions* parser_options) { - DCHECK(option.starts_with("--compiler-backend=")); - parser_options->requested_specific_compiler = true; - StringPiece backend_str = option.substr(strlen("--compiler-backend=")).data(); - if (backend_str == "Quick") { - compiler_kind_ = Compiler::kQuick; - } else if (backend_str == "Optimizing") { - compiler_kind_ = Compiler::kOptimizing; - } else { - Usage("Unknown compiler backend: %s", backend_str.data()); - } - } - - void ParseImageFormat(const StringPiece& option) { - const StringPiece substr("--image-format="); - DCHECK(option.starts_with(substr)); - const StringPiece format_str = option.substr(substr.length()); - if (format_str == "lz4") { - image_storage_mode_ = ImageHeader::kStorageModeLZ4; - } else if (format_str == "lz4hc") { - image_storage_mode_ = ImageHeader::kStorageModeLZ4HC; - } else if (format_str == "uncompressed") { - image_storage_mode_ = ImageHeader::kStorageModeUncompressed; - } else { - Usage("Unknown image format: %s", format_str.data()); + Usage("Error parsing '%s': %s", option.c_str(), parser_options->error_msg.c_str()); } } @@ -1092,23 +1026,20 @@ class Dex2Oat FINAL { base_symbol_oat = base_symbol_oat.substr(0, last_symbol_oat_slash + 1); } - const size_t num_expanded_files = 2 + (base_symbol_oat.empty() ? 0 : 1); - char_backing_storage_.reserve((dex_locations_.size() - 1) * num_expanded_files); - // Now create the other names. Use a counted loop to skip the first one. for (size_t i = 1; i < dex_locations_.size(); ++i) { // TODO: Make everything properly std::string. std::string image_name = CreateMultiImageName(dex_locations_[i], prefix, infix, ".art"); - char_backing_storage_.push_back(base_img + image_name); - image_filenames_.push_back((char_backing_storage_.end() - 1)->c_str()); + char_backing_storage_.push_front(base_img + image_name); + image_filenames_.push_back(char_backing_storage_.front().c_str()); std::string oat_name = CreateMultiImageName(dex_locations_[i], prefix, infix, ".oat"); - char_backing_storage_.push_back(base_oat + oat_name); - oat_filenames_.push_back((char_backing_storage_.end() - 1)->c_str()); + char_backing_storage_.push_front(base_oat + oat_name); + oat_filenames_.push_back(char_backing_storage_.front().c_str()); if (!base_symbol_oat.empty()) { - char_backing_storage_.push_back(base_symbol_oat + oat_name); - oat_unstripped_.push_back((char_backing_storage_.end() - 1)->c_str()); + char_backing_storage_.push_front(base_symbol_oat + oat_name); + oat_unstripped_.push_back(char_backing_storage_.front().c_str()); } } } @@ -1173,6 +1104,43 @@ class Dex2Oat FINAL { kUseReadBarrier ? OatHeader::kTrueValue : OatHeader::kFalseValue); } + // This simple forward is here so the string specializations below don't look out of place. + template <typename T, typename U> + void AssignIfExists(Dex2oatArgumentMap& map, + const Dex2oatArgumentMap::Key<T>& key, + U* out) { + map.AssignIfExists(key, out); + } + + // Specializations to handle const char* vs std::string. + void AssignIfExists(Dex2oatArgumentMap& map, + const Dex2oatArgumentMap::Key<std::string>& key, + const char** out) { + if (map.Exists(key)) { + char_backing_storage_.push_front(std::move(*map.Get(key))); + *out = char_backing_storage_.front().c_str(); + } + } + void AssignIfExists(Dex2oatArgumentMap& map, + const Dex2oatArgumentMap::Key<std::vector<std::string>>& key, + std::vector<const char*>* out) { + if (map.Exists(key)) { + for (auto& val : *map.Get(key)) { + char_backing_storage_.push_front(std::move(val)); + out->push_back(char_backing_storage_.front().c_str()); + } + } + } + + template <typename T> + void AssignTrueIfExists(Dex2oatArgumentMap& map, + const Dex2oatArgumentMap::Key<T>& key, + bool* out) { + if (map.Exists(key)) { + *out = true; + } + } + // Parse the arguments from the command line. In case of an unrecognized option or impossible // values/combinations, a usage error will be displayed and exit() is called. Thus, if the method // returns, arguments have been successfully parsed. @@ -1182,159 +1150,104 @@ class Dex2Oat FINAL { InitLogging(argv, Runtime::Abort); - // Skip over argv[0]. - argv++; - argc--; + compiler_options_.reset(new CompilerOptions()); - if (argc == 0) { - Usage("No arguments specified"); + using M = Dex2oatArgumentMap; + std::string error_msg; + std::unique_ptr<M> args_uptr = M::Parse(argc, const_cast<const char**>(argv), &error_msg); + if (args_uptr == nullptr) { + Usage("Failed to parse command line: %s", error_msg.c_str()); + UNREACHABLE(); } + M& args = *args_uptr; + std::unique_ptr<ParserOptions> parser_options(new ParserOptions()); - compiler_options_.reset(new CompilerOptions()); - for (int i = 0; i < argc; i++) { - const StringPiece option(argv[i]); - const bool log_options = false; - if (log_options) { - LOG(INFO) << "dex2oat: option[" << i << "]=" << argv[i]; - } - if (option.starts_with("--dex-file=")) { - dex_filenames_.push_back(option.substr(strlen("--dex-file=")).data()); - } else if (option.starts_with("--dex-location=")) { - dex_locations_.push_back(option.substr(strlen("--dex-location=")).data()); - } else if (option.starts_with("--zip-fd=")) { - ParseZipFd(option); - } else if (option.starts_with("--zip-location=")) { - zip_location_ = option.substr(strlen("--zip-location=")).data(); - } else if (option.starts_with("--input-vdex-fd=")) { - ParseInputVdexFd(option); - } else if (option.starts_with("--input-vdex=")) { - input_vdex_ = option.substr(strlen("--input-vdex=")).data(); - } else if (option.starts_with("--output-vdex=")) { - output_vdex_ = option.substr(strlen("--output-vdex=")).data(); - } else if (option.starts_with("--output-vdex-fd=")) { - ParseOutputVdexFd(option); - } else if (option.starts_with("--oat-file=")) { - oat_filenames_.push_back(option.substr(strlen("--oat-file=")).data()); - } else if (option.starts_with("--oat-symbols=")) { - parser_options->oat_symbols.push_back(option.substr(strlen("--oat-symbols=")).data()); - } else if (option.starts_with("--oat-fd=")) { - ParseOatFd(option); - } else if (option.starts_with("--oat-location=")) { - oat_location_ = option.substr(strlen("--oat-location=")).data(); - } else if (option == "--watch-dog") { - parser_options->watch_dog_enabled = true; - } else if (option == "--no-watch-dog") { - parser_options->watch_dog_enabled = false; - } else if (option.starts_with("--watchdog-timeout=")) { - ParseIntOption(option, - "--watchdog-timeout", - &parser_options->watch_dog_timeout_in_ms, - Usage); - } else if (option.starts_with("-j")) { - ParseJ(option); - } else if (option.starts_with("--image=")) { - image_filenames_.push_back(option.substr(strlen("--image=")).data()); - } else if (option.starts_with("--image-classes=")) { - image_classes_filename_ = option.substr(strlen("--image-classes=")).data(); - } else if (option.starts_with("--image-classes-zip=")) { - image_classes_zip_filename_ = option.substr(strlen("--image-classes-zip=")).data(); - } else if (option.starts_with("--image-format=")) { - ParseImageFormat(option); - } else if (option.starts_with("--compiled-classes=")) { - compiled_classes_filename_ = option.substr(strlen("--compiled-classes=")).data(); - } else if (option.starts_with("--compiled-classes-zip=")) { - compiled_classes_zip_filename_ = option.substr(strlen("--compiled-classes-zip=")).data(); - } else if (option.starts_with("--compiled-methods=")) { - compiled_methods_filename_ = option.substr(strlen("--compiled-methods=")).data(); - } else if (option.starts_with("--compiled-methods-zip=")) { - compiled_methods_zip_filename_ = option.substr(strlen("--compiled-methods-zip=")).data(); - } else if (option.starts_with("--run-passes=")) { - passes_to_run_filename_ = option.substr(strlen("--run-passes=")).data(); - } else if (option.starts_with("--base=")) { - ParseBase(option); - } else if (option.starts_with("--boot-image=")) { - parser_options->boot_image_filename = option.substr(strlen("--boot-image=")).data(); - } else if (option.starts_with("--android-root=")) { - android_root_ = option.substr(strlen("--android-root=")).data(); - } else if (option.starts_with("--instruction-set=")) { - ParseInstructionSet(option); - } else if (option.starts_with("--instruction-set-variant=")) { - ParseInstructionSetVariant(option, parser_options.get()); - } else if (option.starts_with("--instruction-set-features=")) { - ParseInstructionSetFeatures(option, parser_options.get()); - } else if (option.starts_with("--compiler-backend=")) { - ParseCompilerBackend(option, parser_options.get()); - } else if (option.starts_with("--profile-file=")) { - profile_file_ = option.substr(strlen("--profile-file=")).ToString(); - } else if (option.starts_with("--profile-file-fd=")) { - ParseUintOption(option, "--profile-file-fd", &profile_file_fd_, Usage); - } else if (option == "--host") { - is_host_ = true; - } else if (option == "--runtime-arg") { - if (++i >= argc) { - Usage("Missing required argument for --runtime-arg"); - } - if (log_options) { - LOG(INFO) << "dex2oat: option[" << i << "]=" << argv[i]; - } - runtime_args_.push_back(argv[i]); - } else if (option == "--dump-timing") { - dump_timing_ = true; - } else if (option == "--dump-passes") { - dump_passes_ = true; - } else if (option == "--dump-stats") { - dump_stats_ = true; - } else if (option == "--avoid-storing-invocation") { - avoid_storing_invocation_ = true; - } else if (option.starts_with("--swap-file=")) { - swap_file_name_ = option.substr(strlen("--swap-file=")).data(); - } else if (option.starts_with("--swap-fd=")) { - ParseUintOption(option, "--swap-fd", &swap_fd_, Usage); - } else if (option.starts_with("--swap-dex-size-threshold=")) { - ParseUintOption(option, - "--swap-dex-size-threshold", - &min_dex_file_cumulative_size_for_swap_, - Usage); - } else if (option.starts_with("--swap-dex-count-threshold=")) { - ParseUintOption(option, - "--swap-dex-count-threshold", - &min_dex_files_for_swap_, - Usage); - } else if (option.starts_with("--very-large-app-threshold=")) { - ParseUintOption(option, - "--very-large-app-threshold", - &very_large_threshold_, - Usage); - } else if (option.starts_with("--app-image-file=")) { - app_image_file_name_ = option.substr(strlen("--app-image-file=")).data(); - } else if (option.starts_with("--app-image-fd=")) { - ParseUintOption(option, "--app-image-fd", &app_image_fd_, Usage); - } else if (option == "--multi-image") { - multi_image_ = true; - } else if (option.starts_with("--no-inline-from=")) { - no_inline_from_string_ = option.substr(strlen("--no-inline-from=")).data(); - } else if (option == "--force-determinism") { - if (!SupportsDeterministicCompilation()) { - Usage("Option --force-determinism requires read barriers or a CMS/MS garbage collector"); - } - force_determinism_ = true; - } else if (option.starts_with("--classpath-dir=")) { - classpath_dir_ = option.substr(strlen("--classpath-dir=")).data(); - } else if (option.starts_with("--class-loader-context=")) { - class_loader_context_ = ClassLoaderContext::Create( - option.substr(strlen("--class-loader-context=")).data()); - if (class_loader_context_ == nullptr) { - Usage("Option --class-loader-context has an incorrect format: %s", option.data()); - } - } else if (option.starts_with("--dirty-image-objects=")) { - dirty_image_objects_filename_ = option.substr(strlen("--dirty-image-objects=")).data(); - } else if (!compiler_options_->ParseCompilerOption(option, Usage)) { - Usage("Unknown argument %s", option.data()); + AssignIfExists(args, M::DexFiles, &dex_filenames_); + AssignIfExists(args, M::DexLocations, &dex_locations_); + AssignIfExists(args, M::OatFiles, &oat_filenames_); + AssignIfExists(args, M::OatSymbols, &parser_options->oat_symbols); + AssignIfExists(args, M::ImageFilenames, &image_filenames_); + AssignIfExists(args, M::ZipFd, &zip_fd_); + AssignIfExists(args, M::ZipLocation, &zip_location_); + AssignIfExists(args, M::InputVdexFd, &input_vdex_fd_); + AssignIfExists(args, M::OutputVdexFd, &output_vdex_fd_); + AssignIfExists(args, M::InputVdex, &input_vdex_); + AssignIfExists(args, M::OutputVdex, &output_vdex_); + AssignIfExists(args, M::OatFd, &oat_fd_); + AssignIfExists(args, M::OatLocation, &oat_location_); + AssignIfExists(args, M::Watchdog, &parser_options->watch_dog_enabled); + AssignIfExists(args, M::WatchdogTimeout, &parser_options->watch_dog_timeout_in_ms); + AssignIfExists(args, M::Threads, &thread_count_); + AssignIfExists(args, M::ImageClasses, &image_classes_filename_); + AssignIfExists(args, M::ImageClassesZip, &image_classes_zip_filename_); + AssignIfExists(args, M::CompiledClasses, &compiled_classes_filename_); + AssignIfExists(args, M::CompiledClassesZip, &compiled_classes_zip_filename_); + AssignIfExists(args, M::CompiledMethods, &compiled_methods_filename_); + AssignIfExists(args, M::CompiledMethodsZip, &compiled_methods_zip_filename_); + AssignIfExists(args, M::Passes, &passes_to_run_filename_); + AssignIfExists(args, M::BootImage, &parser_options->boot_image_filename); + AssignIfExists(args, M::AndroidRoot, &android_root_); + AssignIfExists(args, M::Profile, &profile_file_); + AssignIfExists(args, M::ProfileFd, &profile_file_fd_); + AssignIfExists(args, M::RuntimeOptions, &runtime_args_); + AssignIfExists(args, M::SwapFile, &swap_file_name_); + AssignIfExists(args, M::SwapFileFd, &swap_fd_); + AssignIfExists(args, M::SwapDexSizeThreshold, &min_dex_file_cumulative_size_for_swap_); + AssignIfExists(args, M::SwapDexCountThreshold, &min_dex_files_for_swap_); + AssignIfExists(args, M::VeryLargeAppThreshold, &very_large_threshold_); + AssignIfExists(args, M::AppImageFile, &app_image_file_name_); + AssignIfExists(args, M::AppImageFileFd, &app_image_fd_); + AssignIfExists(args, M::NoInlineFrom, &no_inline_from_string_); + AssignIfExists(args, M::ClasspathDir, &classpath_dir_); + AssignIfExists(args, M::DirtyImageObjects, &dirty_image_objects_filename_); + AssignIfExists(args, M::ImageFormat, &image_storage_mode_); + + AssignIfExists(args, M::Backend, &compiler_kind_); + parser_options->requested_specific_compiler = args.Exists(M::Backend); + + AssignIfExists(args, M::TargetInstructionSet, &instruction_set_); + // arm actually means thumb2. + if (instruction_set_ == InstructionSet::kArm) { + instruction_set_ = InstructionSet::kThumb2; + } + + AssignTrueIfExists(args, M::Host, &is_host_); + AssignTrueIfExists(args, M::DumpTiming, &dump_timing_); + AssignTrueIfExists(args, M::DumpPasses, &dump_passes_); + AssignTrueIfExists(args, M::DumpStats, &dump_stats_); + AssignTrueIfExists(args, M::AvoidStoringInvocation, &avoid_storing_invocation_); + AssignTrueIfExists(args, M::MultiImage, &multi_image_); + + if (args.Exists(M::ForceDeterminism)) { + if (!SupportsDeterministicCompilation()) { + Usage("Option --force-determinism requires read barriers or a CMS/MS garbage collector"); + } + force_determinism_ = true; + } + + if (args.Exists(M::Base)) { + ParseBase(*args.Get(M::Base)); + } + if (args.Exists(M::TargetInstructionSetVariant)) { + ParseInstructionSetVariant(*args.Get(M::TargetInstructionSetVariant), parser_options.get()); + } + if (args.Exists(M::TargetInstructionSetFeatures)) { + ParseInstructionSetFeatures(*args.Get(M::TargetInstructionSetFeatures), parser_options.get()); + } + if (args.Exists(M::ClassLoaderContext)) { + class_loader_context_ = ClassLoaderContext::Create(*args.Get(M::ClassLoaderContext)); + if (class_loader_context_ == nullptr) { + Usage("Option --class-loader-context has an incorrect format: %s", + args.Get(M::ClassLoaderContext)->c_str()); } } + if (!ReadCompilerOptions(args, compiler_options_.get(), &error_msg)) { + Usage(error_msg.c_str()); + } + ProcessOptions(parser_options.get()); // Insert some compiler things. @@ -2931,7 +2844,7 @@ class Dex2Oat FINAL { std::unordered_map<const DexFile*, size_t> dex_file_oat_index_map_; // Backing storage. - std::vector<std::string> char_backing_storage_; + std::forward_list<std::string> char_backing_storage_; // See CompilerOptions.force_determinism_. bool force_determinism_; diff --git a/dex2oat/dex2oat_image_test.cc b/dex2oat/dex2oat_image_test.cc index 1f644c15dc..ae7ebe2da1 100644 --- a/dex2oat/dex2oat_image_test.cc +++ b/dex2oat/dex2oat_image_test.cc @@ -28,6 +28,7 @@ #include "base/macros.h" #include "base/unix_file/fd_file.h" #include "dex_file-inl.h" +#include "dex_file_loader.h" #include "jit/profile_compilation_info.h" #include "method_reference.h" #include "runtime.h" @@ -62,7 +63,11 @@ class Dex2oatImageTest : public CommonRuntimeTest { for (const std::string& dex : GetLibCoreDexFileNames()) { std::vector<std::unique_ptr<const DexFile>> dex_files; std::string error_msg; - CHECK(DexFile::Open(dex.c_str(), dex, /*verify_checksum*/ false, &error_msg, &dex_files)) + CHECK(DexFileLoader::Open(dex.c_str(), + dex, + /*verify_checksum*/ false, + &error_msg, + &dex_files)) << error_msg; for (const std::unique_ptr<const DexFile>& dex_file : dex_files) { for (size_t i = 0; i < dex_file->NumMethodIds(); ++i) { @@ -328,8 +333,8 @@ TEST_F(Dex2oatImageTest, TestModesAndFilters) { profile_file.Close(); std::cout << "Profile sizes " << profile_sizes << std::endl; // Since there is some difference between profile vs image + methods due to layout, check that - // the range is within expected margins (+-5%). - const double kRatio = 0.95; + // the range is within expected margins (+-10%). + const double kRatio = 0.90; EXPECT_LE(profile_sizes.art_size * kRatio, compiled_methods_sizes.art_size); // TODO(mathieuc): Find a reliable way to check compiled code. b/63746626 // EXPECT_LE(profile_sizes.oat_size * kRatio, compiled_methods_sizes.oat_size); diff --git a/dex2oat/dex2oat_options.cc b/dex2oat/dex2oat_options.cc new file mode 100644 index 0000000000..43e6c4d02f --- /dev/null +++ b/dex2oat/dex2oat_options.cc @@ -0,0 +1,268 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "dex2oat_options.h" + +#include <memory> + +#include "cmdline_parser.h" +#include "driver/compiler_options_map-inl.h" + +namespace art { + +template<> +struct CmdlineType<InstructionSet> : CmdlineTypeParser<InstructionSet> { + Result Parse(const std::string& option) { + InstructionSet set = GetInstructionSetFromString(option.c_str()); + if (set == kNone) { + return Result::Failure(std::string("Not a valid instruction set: '") + option + "'"); + } + return Result::Success(set); + } + + static const char* Name() { return "InstructionSet"; } +}; + +#define COMPILER_OPTIONS_MAP_TYPE Dex2oatArgumentMap +#define COMPILER_OPTIONS_MAP_KEY_TYPE Dex2oatArgumentMapKey +#include "driver/compiler_options_map-storage.h" + +// Specify storage for the Dex2oatOptions keys. + +#define DEX2OAT_OPTIONS_KEY(Type, Name, ...) \ + const Dex2oatArgumentMap::Key<Type> Dex2oatArgumentMap::Name {__VA_ARGS__}; // NOLINT [readability/braces] [4] +#include "dex2oat_options.def" + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wframe-larger-than=" + +using M = Dex2oatArgumentMap; +using Parser = CmdlineParser<Dex2oatArgumentMap, Dex2oatArgumentMap::Key>; +using Builder = Parser::Builder; + +static void AddInputMappings(Builder& builder) { + builder. + Define("--dex-file=_") + .WithType<std::vector<std::string>>().AppendValues() + .IntoKey(M::DexFiles) + .Define("--dex-location=_") + .WithType<std::vector<std::string>>().AppendValues() + .IntoKey(M::DexLocations) + .Define("--zip-fd=_") + .WithType<int>() + .IntoKey(M::ZipFd) + .Define("--zip-location=_") + .WithType<std::string>() + .IntoKey(M::ZipLocation) + .Define("--boot-image=_") + .WithType<std::string>() + .IntoKey(M::BootImage); +} + +static void AddGeneratedArtifactMappings(Builder& builder) { + builder. + Define("--input-vdex-fd=_") + .WithType<int>() + .IntoKey(M::InputVdexFd) + .Define("--input-vdex=_") + .WithType<std::string>() + .IntoKey(M::InputVdex) + .Define("--output-vdex-fd=_") + .WithType<int>() + .IntoKey(M::OutputVdexFd) + .Define("--output-vdex=_") + .WithType<std::string>() + .IntoKey(M::OutputVdex) + .Define("--oat-file=_") + .WithType<std::vector<std::string>>().AppendValues() + .IntoKey(M::OatFiles) + .Define("--oat-symbols=_") + .WithType<std::vector<std::string>>().AppendValues() + .IntoKey(M::OatSymbols) + .Define("--oat-fd=_") + .WithType<int>() + .IntoKey(M::OatFd) + .Define("--oat-location=_") + .WithType<std::string>() + .IntoKey(M::OatLocation); +} + +static void AddImageMappings(Builder& builder) { + builder. + Define("--image=_") + .WithType<std::vector<std::string>>().AppendValues() + .IntoKey(M::ImageFilenames) + .Define("--image-classes=_") + .WithType<std::string>() + .IntoKey(M::ImageClasses) + .Define("--image-classes-zip=_") + .WithType<std::string>() + .IntoKey(M::ImageClassesZip) + .Define("--base=_") + .WithType<std::string>() + .IntoKey(M::Base) + .Define("--app-image-file=_") + .WithType<std::string>() + .IntoKey(M::AppImageFile) + .Define("--app-image-fd=_") + .WithType<int>() + .IntoKey(M::AppImageFileFd) + .Define("--multi-image") + .IntoKey(M::MultiImage) + .Define("--dirty-image-objects=_") + .WithType<std::string>() + .IntoKey(M::DirtyImageObjects) + .Define("--image-format=_") + .WithType<ImageHeader::StorageMode>() + .WithValueMap({{"lz4", ImageHeader::kStorageModeLZ4}, + {"lz4hc", ImageHeader::kStorageModeLZ4HC}, + {"uncompressed", ImageHeader::kStorageModeUncompressed}}) + .IntoKey(M::ImageFormat); +} + +static void AddSwapMappings(Builder& builder) { + builder. + Define("--swap-file=_") + .WithType<std::string>() + .IntoKey(M::SwapFile) + .Define("--swap-fd=_") + .WithType<int>() + .IntoKey(M::SwapFileFd) + .Define("--swap-dex-size-threshold=_") + .WithType<unsigned int>() + .IntoKey(M::SwapDexSizeThreshold) + .Define("--swap-dex-count-threshold=_") + .WithType<unsigned int>() + .IntoKey(M::SwapDexCountThreshold); +} + +static void AddCompilerMappings(Builder& builder) { + builder. + Define("--compiled-classes=_") + .WithType<std::string>() + .IntoKey(M::CompiledClasses) + .Define("--compiled-classes-zip=_") + .WithType<std::string>() + .IntoKey(M::CompiledClassesZip) + .Define("--compiled-methods=_") + .WithType<std::string>() + .IntoKey(M::CompiledMethods) + .Define("--compiled-methods-zip=_") + .WithType<std::string>() + .IntoKey(M::CompiledMethodsZip) + .Define("--run-passes=_") + .WithType<std::string>() + .IntoKey(M::Passes) + .Define("--profile-file=_") + .WithType<std::string>() + .IntoKey(M::Profile) + .Define("--profile-file-fd=_") + .WithType<int>() + .IntoKey(M::ProfileFd) + .Define("--no-inline-from=_") + .WithType<std::string>() + .IntoKey(M::NoInlineFrom); +} + +static void AddTargetMappings(Builder& builder) { + builder. + Define("--instruction-set=_") + .WithType<InstructionSet>() + .IntoKey(M::TargetInstructionSet) + .Define("--instruction-set-variant=_") + .WithType<std::string>() + .IntoKey(M::TargetInstructionSetVariant) + .Define("--instruction-set-features=_") + .WithType<std::string>() + .IntoKey(M::TargetInstructionSetFeatures); +} + +static Parser CreateArgumentParser() { + std::unique_ptr<Builder> parser_builder = std::unique_ptr<Builder>(new Builder()); + + AddInputMappings(*parser_builder); + AddGeneratedArtifactMappings(*parser_builder); + AddImageMappings(*parser_builder); + AddSwapMappings(*parser_builder); + AddCompilerMappings(*parser_builder); + AddTargetMappings(*parser_builder); + + parser_builder-> + Define({"--watch-dog", "--no-watch-dog"}) + .WithValues({true, false}) + .IntoKey(M::Watchdog) + .Define("--watchdog-timeout=_") + .WithType<int>() + .IntoKey(M::WatchdogTimeout) + .Define("-j_") + .WithType<unsigned int>() + .IntoKey(M::Threads) + .Define("--android-root=_") + .WithType<std::string>() + .IntoKey(M::AndroidRoot) + .Define("--compiler-backend=_") + .WithType<Compiler::Kind>() + .WithValueMap({{"Quick", Compiler::Kind::kQuick}, + {"Optimizing", Compiler::Kind::kOptimizing}}) + .IntoKey(M::Backend) + .Define("--host") + .IntoKey(M::Host) + .Define("--dump-timing") + .IntoKey(M::DumpTiming) + .Define("--dump-passes") + .IntoKey(M::DumpPasses) + .Define("--dump-stats") + .IntoKey(M::DumpStats) + .Define("--avoid-storing-invocation") + .IntoKey(M::AvoidStoringInvocation) + .Define("--very-large-app-threshold=_") + .WithType<unsigned int>() + .IntoKey(M::VeryLargeAppThreshold) + .Define("--force-determinism") + .IntoKey(M::ForceDeterminism) + .Define("--classpath-dir=_") + .WithType<std::string>() + .IntoKey(M::ClasspathDir) + .Define("--class-loader-context=_") + .WithType<std::string>() + .IntoKey(M::ClassLoaderContext) + .Define("--runtime-arg _") + .WithType<std::vector<std::string>>().AppendValues() + .IntoKey(M::RuntimeOptions); + + AddCompilerOptionsArgumentParserOptions<Dex2oatArgumentMap>(*parser_builder); + + parser_builder->IgnoreUnrecognized(false); + + return parser_builder->Build(); +} + +#pragma GCC diagnostic pop + +std::unique_ptr<Dex2oatArgumentMap> Dex2oatArgumentMap::Parse(int argc, + const char** argv, + std::string* error_msg) { + Parser parser = CreateArgumentParser(); + CmdlineResult parse_result = parser.Parse(argv, argc); + if (!parse_result.IsSuccess()) { + *error_msg = parse_result.GetMessage(); + return nullptr; + } + + return std::unique_ptr<Dex2oatArgumentMap>(new Dex2oatArgumentMap(parser.ReleaseArgumentsMap())); +} + +} // namespace art diff --git a/dex2oat/dex2oat_options.def b/dex2oat/dex2oat_options.def new file mode 100644 index 0000000000..83a3035ed5 --- /dev/null +++ b/dex2oat/dex2oat_options.def @@ -0,0 +1,90 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef DEX2OAT_OPTIONS_KEY +#error "Please #define DEX2OAT_OPTIONS_KEY before #including this file" +#define DEX2OAT_OPTIONS_KEY(...) // Don't display errors in this file in IDEs. +#endif + +// This file defines the list of keys for Dex2oatOptions. +// These can be used with Dex2oatOptions.Get/Set/etc, for example: +// Dex2oatOptions opt; bool* dex2oat_enabled = opt.Get(Dex2oatOptions::Dex2Oat); +// +// Column Descriptions: +// <<Type>> <<Key Name>> <<Default Value>> +// +// Default values are only used by Map::GetOrDefault(K<T>). +// If a default value is omitted here, T{} is used as the default value, which is +// almost-always the value of the type as if it was memset to all 0. +// +// Please keep the columns aligned if possible when adding new rows. +// + +// Parse-able keys from the command line. +DEX2OAT_OPTIONS_KEY (std::vector<std::string>, DexFiles) +DEX2OAT_OPTIONS_KEY (std::vector<std::string>, DexLocations) +DEX2OAT_OPTIONS_KEY (int, ZipFd) +DEX2OAT_OPTIONS_KEY (std::string, ZipLocation) +DEX2OAT_OPTIONS_KEY (int, InputVdexFd) +DEX2OAT_OPTIONS_KEY (std::string, InputVdex) +DEX2OAT_OPTIONS_KEY (int, OutputVdexFd) +DEX2OAT_OPTIONS_KEY (std::string, OutputVdex) +DEX2OAT_OPTIONS_KEY (std::vector<std::string>, OatFiles) +DEX2OAT_OPTIONS_KEY (std::vector<std::string>, OatSymbols) +DEX2OAT_OPTIONS_KEY (int, OatFd) +DEX2OAT_OPTIONS_KEY (std::string, OatLocation) +DEX2OAT_OPTIONS_KEY (bool, Watchdog) +DEX2OAT_OPTIONS_KEY (int, WatchdogTimeout) +DEX2OAT_OPTIONS_KEY (unsigned int, Threads) +DEX2OAT_OPTIONS_KEY (std::vector<std::string>, ImageFilenames) +DEX2OAT_OPTIONS_KEY (std::string, ImageClasses) +DEX2OAT_OPTIONS_KEY (std::string, ImageClassesZip) +DEX2OAT_OPTIONS_KEY (ImageHeader::StorageMode, ImageFormat) +DEX2OAT_OPTIONS_KEY (std::string, CompiledClasses) +DEX2OAT_OPTIONS_KEY (std::string, CompiledClassesZip) +DEX2OAT_OPTIONS_KEY (std::string, CompiledMethods) +DEX2OAT_OPTIONS_KEY (std::string, CompiledMethodsZip) +DEX2OAT_OPTIONS_KEY (std::string, Passes) +DEX2OAT_OPTIONS_KEY (std::string, Base) // TODO: Hex string parsing. +DEX2OAT_OPTIONS_KEY (std::string, BootImage) +DEX2OAT_OPTIONS_KEY (std::string, AndroidRoot) +DEX2OAT_OPTIONS_KEY (InstructionSet, TargetInstructionSet) +DEX2OAT_OPTIONS_KEY (std::string, TargetInstructionSetVariant) +DEX2OAT_OPTIONS_KEY (std::string, TargetInstructionSetFeatures) +DEX2OAT_OPTIONS_KEY (Compiler::Kind, Backend) +DEX2OAT_OPTIONS_KEY (std::string, Profile) +DEX2OAT_OPTIONS_KEY (int, ProfileFd) +DEX2OAT_OPTIONS_KEY (Unit, Host) +DEX2OAT_OPTIONS_KEY (Unit, DumpTiming) +DEX2OAT_OPTIONS_KEY (Unit, DumpPasses) +DEX2OAT_OPTIONS_KEY (Unit, DumpStats) +DEX2OAT_OPTIONS_KEY (Unit, AvoidStoringInvocation) +DEX2OAT_OPTIONS_KEY (std::string, SwapFile) +DEX2OAT_OPTIONS_KEY (int, SwapFileFd) +DEX2OAT_OPTIONS_KEY (unsigned int, SwapDexSizeThreshold) +DEX2OAT_OPTIONS_KEY (unsigned int, SwapDexCountThreshold) +DEX2OAT_OPTIONS_KEY (unsigned int, VeryLargeAppThreshold) +DEX2OAT_OPTIONS_KEY (std::string, AppImageFile) +DEX2OAT_OPTIONS_KEY (int, AppImageFileFd) +DEX2OAT_OPTIONS_KEY (Unit, MultiImage) +DEX2OAT_OPTIONS_KEY (std::string, NoInlineFrom) +DEX2OAT_OPTIONS_KEY (Unit, ForceDeterminism) +DEX2OAT_OPTIONS_KEY (std::string, ClasspathDir) +DEX2OAT_OPTIONS_KEY (std::string, ClassLoaderContext) +DEX2OAT_OPTIONS_KEY (std::string, DirtyImageObjects) +DEX2OAT_OPTIONS_KEY (std::vector<std::string>, RuntimeOptions) + +#undef DEX2OAT_OPTIONS_KEY diff --git a/dex2oat/dex2oat_options.h b/dex2oat/dex2oat_options.h new file mode 100644 index 0000000000..a4c718625f --- /dev/null +++ b/dex2oat/dex2oat_options.h @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_DEX2OAT_DEX2OAT_OPTIONS_H_ +#define ART_DEX2OAT_DEX2OAT_OPTIONS_H_ + +#include <cstdio> +#include <string> +#include <vector> + +#include "base/variant_map.h" +#include "cmdline_types.h" // TODO: don't need to include this file here +#include "compiler.h" +#include "driver/compiler_options_map.h" +#include "image.h" + +namespace art { + +template <typename TVariantMap, + template <typename TKeyValue> class TVariantMapKey> +struct CmdlineParser; + +// Define a key that is usable with a Dex2oatArgumentMap. +// This key will *not* work with other subtypes of VariantMap. +template <typename TValue> +struct Dex2oatArgumentMapKey : VariantMapKey<TValue> { + Dex2oatArgumentMapKey() {} + explicit Dex2oatArgumentMapKey(TValue default_value) + : VariantMapKey<TValue>(std::move(default_value)) {} + // Don't ODR-use constexpr default values, which means that Struct::Fields + // that are declared 'static constexpr T Name = Value' don't need to have a matching definition. +}; + +// Defines a type-safe heterogeneous key->value map. +// Use the VariantMap interface to look up or to store a Dex2oatArgumentMapKey,Value pair. +// +// Example: +// auto map = Dex2oatArgumentMap(); +// map.Set(Dex2oatArgumentMap::ZipFd, -1); +// int *target_utilization = map.Get(Dex2oatArgumentMap::ZipFd); +// +struct Dex2oatArgumentMap : CompilerOptionsMap<Dex2oatArgumentMap, Dex2oatArgumentMapKey> { + // This 'using' line is necessary to inherit the variadic constructor. + using CompilerOptionsMap<Dex2oatArgumentMap, Dex2oatArgumentMapKey>::CompilerOptionsMap; + + static std::unique_ptr<Dex2oatArgumentMap> Parse(int argc, + const char** argv, + std::string* error_msg); + + // Make the next many usages of Key slightly shorter to type. + template <typename TValue> + using Key = Dex2oatArgumentMapKey<TValue>; + + // List of key declarations, shorthand for 'static const Key<T> Name' +#define DEX2OAT_OPTIONS_KEY(Type, Name, ...) static const Key<Type> (Name); +#include "dex2oat_options.def" +}; + +extern template struct CompilerOptionsMap<Dex2oatArgumentMap, Dex2oatArgumentMapKey>; + +} // namespace art + +#endif // ART_DEX2OAT_DEX2OAT_OPTIONS_H_ diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc index 5bf35139cb..1b731fc7f6 100644 --- a/dex2oat/dex2oat_test.cc +++ b/dex2oat/dex2oat_test.cc @@ -33,6 +33,7 @@ #include "dex2oat_environment_test.h" #include "dex2oat_return_codes.h" #include "dex_file-inl.h" +#include "dex_file_loader.h" #include "jit/profile_compilation_info.h" #include "oat.h" #include "oat_file.h" @@ -677,7 +678,7 @@ class Dex2oatLayoutTest : public Dex2oatTest { const char* location = dex_location.c_str(); std::string error_msg; std::vector<std::unique_ptr<const DexFile>> dex_files; - ASSERT_TRUE(DexFile::Open(location, location, true, &error_msg, &dex_files)); + ASSERT_TRUE(DexFileLoader::Open(location, location, true, &error_msg, &dex_files)); EXPECT_EQ(dex_files.size(), 1U); std::unique_ptr<const DexFile>& dex_file = dex_files[0]; GenerateProfile(profile_location, @@ -811,7 +812,7 @@ class Dex2oatLayoutTest : public Dex2oatTest { const char* location = dex_location.c_str(); std::vector<std::unique_ptr<const DexFile>> dex_files; - ASSERT_TRUE(DexFile::Open(location, location, true, &error_msg, &dex_files)); + ASSERT_TRUE(DexFileLoader::Open(location, location, true, &error_msg, &dex_files)); EXPECT_EQ(dex_files.size(), 1U); std::unique_ptr<const DexFile>& old_dex_file = dex_files[0]; diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc index dfbe31a548..05af442a3a 100644 --- a/dex2oat/linker/oat_writer.cc +++ b/dex2oat/linker/oat_writer.cc @@ -34,6 +34,7 @@ #include "debug/method_debug_info.h" #include "dex/verification_results.h" #include "dex_file-inl.h" +#include "dex_file_loader.h" #include "dex_file_types.h" #include "dexlayout.h" #include "driver/compiler_driver-inl.h" @@ -52,6 +53,7 @@ #include "mirror/class_loader.h" #include "mirror/dex_cache-inl.h" #include "mirror/object-inl.h" +#include "native_dex_file.h" #include "oat_quick_method_header.h" #include "os.h" #include "safe_map.h" @@ -415,7 +417,7 @@ bool OatWriter::AddDexFileSource(const char* filename, if (fd.Fd() == -1) { PLOG(ERROR) << "Failed to read magic number from dex file: '" << filename << "'"; return false; - } else if (IsDexMagic(magic)) { + } else if (DexFileLoader::IsValidMagic(magic)) { // The file is open for reading, not writing, so it's OK to let the File destructor // close it without checking for explicit Close(), so pass checkUsage = false. raw_dex_files_.emplace_back(new File(fd.Release(), location, /* checkUsage */ false)); @@ -447,13 +449,13 @@ bool OatWriter::AddZippedDexFilesSource(File&& zip_fd, return false; } for (size_t i = 0; ; ++i) { - std::string entry_name = DexFile::GetMultiDexClassesDexName(i); + std::string entry_name = DexFileLoader::GetMultiDexClassesDexName(i); std::unique_ptr<ZipEntry> entry(zip_archive->Find(entry_name.c_str(), &error_msg)); if (entry == nullptr) { break; } zipped_dex_files_.push_back(std::move(entry)); - zipped_dex_file_locations_.push_back(DexFile::GetMultiDexLocation(i, location)); + zipped_dex_file_locations_.push_back(DexFileLoader::GetMultiDexLocation(i, location)); const char* full_location = zipped_dex_file_locations_.back().c_str(); oat_dex_files_.emplace_back(full_location, DexFileSource(zipped_dex_files_.back().get()), @@ -478,12 +480,13 @@ bool OatWriter::AddVdexDexFilesSource(const VdexFile& vdex_file, LOG(ERROR) << "Unexpected number of dex files in vdex " << location; return false; } - if (!DexFile::IsMagicValid(current_dex_data)) { + + if (!DexFileLoader::IsValidMagic(current_dex_data)) { LOG(ERROR) << "Invalid magic in vdex file created from " << location; return false; } // We used `zipped_dex_file_locations_` to keep the strings in memory. - zipped_dex_file_locations_.push_back(DexFile::GetMultiDexLocation(i, location)); + zipped_dex_file_locations_.push_back(DexFileLoader::GetMultiDexLocation(i, location)); const char* full_location = zipped_dex_file_locations_.back().c_str(); oat_dex_files_.emplace_back(full_location, DexFileSource(current_dex_data), @@ -3107,11 +3110,12 @@ bool OatWriter::ReadDexFileHeader(File* file, OatDexFile* oat_dex_file) { } bool OatWriter::ValidateDexFileHeader(const uint8_t* raw_header, const char* location) { - if (!DexFile::IsMagicValid(raw_header)) { + const bool valid_native_dex_magic = NativeDexFile::IsMagicValid(raw_header); + if (!valid_native_dex_magic) { LOG(ERROR) << "Invalid magic number in dex file header. " << " File: " << location; return false; } - if (!DexFile::IsVersionValid(raw_header)) { + if (!NativeDexFile::IsVersionValid(raw_header)) { LOG(ERROR) << "Invalid version number in dex file header. " << " File: " << location; return false; } @@ -3242,12 +3246,12 @@ bool OatWriter::LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_fil LOG(ERROR) << "Failed to extract dex file to mem map for layout: " << error_msg; return false; } - dex_file = DexFile::Open(location, - zip_entry->GetCrc32(), - std::move(mem_map), - /* verify */ true, - /* verify_checksum */ true, - &error_msg); + dex_file = DexFileLoader::Open(location, + zip_entry->GetCrc32(), + std::move(mem_map), + /* verify */ true, + /* verify_checksum */ true, + &error_msg); } else if (oat_dex_file->source_.IsRawFile()) { File* raw_file = oat_dex_file->source_.GetRawFile(); int dup_fd = dup(raw_file->Fd()); @@ -3255,7 +3259,7 @@ bool OatWriter::LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_fil PLOG(ERROR) << "Failed to dup dex file descriptor (" << raw_file->Fd() << ") at " << location; return false; } - dex_file = DexFile::OpenDex(dup_fd, location, /* verify_checksum */ true, &error_msg); + dex_file = DexFileLoader::OpenDex(dup_fd, location, /* verify_checksum */ true, &error_msg); } else { // The source data is a vdex file. CHECK(oat_dex_file->source_.IsRawData()) @@ -3267,14 +3271,14 @@ bool OatWriter::LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_fil DCHECK(ValidateDexFileHeader(raw_dex_file, oat_dex_file->GetLocation())); const UnalignedDexFileHeader* header = AsUnalignedDexFileHeader(raw_dex_file); // Since the source may have had its layout changed, or may be quickened, don't verify it. - dex_file = DexFile::Open(raw_dex_file, - header->file_size_, - location, - oat_dex_file->dex_file_location_checksum_, - nullptr, - /* verify */ false, - /* verify_checksum */ false, - &error_msg); + dex_file = DexFileLoader::Open(raw_dex_file, + header->file_size_, + location, + oat_dex_file->dex_file_location_checksum_, + nullptr, + /* verify */ false, + /* verify_checksum */ false, + &error_msg); } if (dex_file == nullptr) { LOG(ERROR) << "Failed to open dex file for layout: " << error_msg; @@ -3532,14 +3536,14 @@ bool OatWriter::OpenDexFiles( } // Now, open the dex file. - dex_files.emplace_back(DexFile::Open(raw_dex_file, - oat_dex_file.dex_file_size_, - oat_dex_file.GetLocation(), - oat_dex_file.dex_file_location_checksum_, - /* oat_dex_file */ nullptr, - verify, - verify, - &error_msg)); + dex_files.emplace_back(DexFileLoader::Open(raw_dex_file, + oat_dex_file.dex_file_size_, + oat_dex_file.GetLocation(), + oat_dex_file.dex_file_location_checksum_, + /* oat_dex_file */ nullptr, + verify, + verify, + &error_msg)); if (dex_files.back() == nullptr) { LOG(ERROR) << "Failed to open dex file from oat file. File: " << oat_dex_file.GetLocation() << " Error: " << error_msg; diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc index d89d9f07b2..a19057a0ed 100644 --- a/dex2oat/linker/oat_writer_test.cc +++ b/dex2oat/linker/oat_writer_test.cc @@ -26,6 +26,7 @@ #include "compiled_method-inl.h" #include "compiler.h" #include "debug/method_debug_info.h" +#include "dex_file_loader.h" #include "dex/quick_compiler_callbacks.h" #include "dex/verification_results.h" #include "driver/compiler_driver.h" @@ -48,16 +49,6 @@ namespace art { namespace linker { -NO_RETURN static void Usage(const char* fmt, ...) { - va_list ap; - va_start(ap, fmt); - std::string error; - android::base::StringAppendV(&error, fmt, ap); - LOG(FATAL) << error; - va_end(ap); - UNREACHABLE(); -} - class OatTest : public CommonCompilerTest { protected: static const bool kCompile = false; // DISABLED_ due to the time to compile libcore @@ -101,8 +92,11 @@ class OatTest : public CommonCompilerTest { insn_features_ = InstructionSetFeatures::FromVariant(insn_set, "default", error_msg); ASSERT_TRUE(insn_features_ != nullptr) << *error_msg; compiler_options_.reset(new CompilerOptions); - for (const std::string& option : compiler_options) { - compiler_options_->ParseCompilerOption(option, Usage); + if (!compiler_options_->ParseCompilerOptions(compiler_options, + false /* ignore_unrecognized */, + error_msg)) { + LOG(FATAL) << *error_msg; + UNREACHABLE(); } verification_results_.reset(new VerificationResults(compiler_options_.get())); callbacks_.reset(new QuickCompilerCallbacks(CompilerCallbacks::CallbackMode::kCompileApp)); @@ -752,14 +746,14 @@ void OatTest::TestZipFileInput(bool verify) { ASSERT_EQ(0, memcmp(&dex_file1_data->GetHeader(), &opened_dex_file1->GetHeader(), dex_file1_data->GetHeader().file_size_)); - ASSERT_EQ(DexFile::GetMultiDexLocation(0, zip_file.GetFilename().c_str()), + ASSERT_EQ(DexFileLoader::GetMultiDexLocation(0, zip_file.GetFilename().c_str()), opened_dex_file1->GetLocation()); ASSERT_EQ(dex_file2_data->GetHeader().file_size_, opened_dex_file2->GetHeader().file_size_); ASSERT_EQ(0, memcmp(&dex_file2_data->GetHeader(), &opened_dex_file2->GetHeader(), dex_file2_data->GetHeader().file_size_)); - ASSERT_EQ(DexFile::GetMultiDexLocation(1, zip_file.GetFilename().c_str()), + ASSERT_EQ(DexFileLoader::GetMultiDexLocation(1, zip_file.GetFilename().c_str()), opened_dex_file2->GetLocation()); } } @@ -801,14 +795,14 @@ void OatTest::TestZipFileInput(bool verify) { ASSERT_EQ(0, memcmp(&dex_file1_data->GetHeader(), &opened_dex_file1->GetHeader(), dex_file1_data->GetHeader().file_size_)); - ASSERT_EQ(DexFile::GetMultiDexLocation(0, zip_file.GetFilename().c_str()), + ASSERT_EQ(DexFileLoader::GetMultiDexLocation(0, zip_file.GetFilename().c_str()), opened_dex_file1->GetLocation()); ASSERT_EQ(dex_file2_data->GetHeader().file_size_, opened_dex_file2->GetHeader().file_size_); ASSERT_EQ(0, memcmp(&dex_file2_data->GetHeader(), &opened_dex_file2->GetHeader(), dex_file2_data->GetHeader().file_size_)); - ASSERT_EQ(DexFile::GetMultiDexLocation(1, zip_file.GetFilename().c_str()), + ASSERT_EQ(DexFileLoader::GetMultiDexLocation(1, zip_file.GetFilename().c_str()), opened_dex_file2->GetLocation()); } } diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc index 7599d230d2..3648a3edd0 100644 --- a/dexdump/dexdump.cc +++ b/dexdump/dexdump.cc @@ -45,6 +45,7 @@ #include "android-base/stringprintf.h" #include "dex_file-inl.h" +#include "dex_file_loader.h" #include "dex_file_types.h" #include "dex_instruction-inl.h" #include "dexdump_cfg.h" @@ -1825,7 +1826,7 @@ static void processDexFile(const char* fileName, fputs("Opened '", gOutFile); fputs(fileName, gOutFile); if (n > 1) { - fprintf(gOutFile, ":%s", DexFile::GetMultiDexClassesDexName(i).c_str()); + fprintf(gOutFile, ":%s", DexFileLoader::GetMultiDexClassesDexName(i).c_str()); } fprintf(gOutFile, "', DEX version '%.3s'\n", pDexFile->GetHeader().magic_ + 4); } @@ -1882,7 +1883,7 @@ int processFile(const char* fileName) { const bool kVerifyChecksum = !gOptions.ignoreBadChecksum; std::string error_msg; std::vector<std::unique_ptr<const DexFile>> dex_files; - if (!DexFile::Open(fileName, fileName, kVerifyChecksum, &error_msg, &dex_files)) { + if (!DexFileLoader::Open(fileName, fileName, kVerifyChecksum, &error_msg, &dex_files)) { // Display returned error message to user. Note that this error behavior // differs from the error messages shown by the original Dalvik dexdump. fputs(error_msg.c_str(), stderr); diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc index 095c960bc0..ade00723fd 100644 --- a/dexlayout/dexlayout.cc +++ b/dexlayout/dexlayout.cc @@ -35,6 +35,7 @@ #include "dex_file-inl.h" #include "dex_file_layout.h" +#include "dex_file_loader.h" #include "dex_file_types.h" #include "dex_file_verifier.h" #include "dex_instruction-inl.h" @@ -1929,14 +1930,14 @@ void DexLayout::OutputDexFile(const DexFile* dex_file) { // Verify the output dex file's structure for debug builds. if (kIsDebugBuild) { std::string location = "memory mapped file for " + dex_file_location; - std::unique_ptr<const DexFile> output_dex_file(DexFile::Open(mem_map_->Begin(), - mem_map_->Size(), - location, - header_->Checksum(), - /*oat_dex_file*/ nullptr, - /*verify*/ true, - /*verify_checksum*/ false, - &error_msg)); + std::unique_ptr<const DexFile> output_dex_file(DexFileLoader::Open(mem_map_->Begin(), + mem_map_->Size(), + location, + header_->Checksum(), + /*oat_dex_file*/ nullptr, + /*verify*/ true, + /*verify_checksum*/ false, + &error_msg)); DCHECK(output_dex_file != nullptr) << "Failed to re-open output file:" << error_msg; } // Do IR-level comparison between input and output. This check ignores potential differences @@ -1998,7 +1999,7 @@ int DexLayout::ProcessFile(const char* file_name) { const bool verify_checksum = !options_.ignore_bad_checksum_; std::string error_msg; std::vector<std::unique_ptr<const DexFile>> dex_files; - if (!DexFile::Open(file_name, file_name, verify_checksum, &error_msg, &dex_files)) { + if (!DexFileLoader::Open(file_name, file_name, verify_checksum, &error_msg, &dex_files)) { // Display returned error message to user. Note that this error behavior // differs from the error messages shown by the original Dalvik dexdump. fputs(error_msg.c_str(), stderr); diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc index 336eb5fbcc..f8fa893069 100644 --- a/dexlayout/dexlayout_test.cc +++ b/dexlayout/dexlayout_test.cc @@ -24,6 +24,7 @@ #include "base/unix_file/fd_file.h" #include "common_runtime_test.h" #include "dex_file-inl.h" +#include "dex_file_loader.h" #include "exec_utils.h" #include "jit/profile_compilation_info.h" #include "utils.h" @@ -322,11 +323,11 @@ class DexLayoutTest : public CommonRuntimeTest { const std::string& dex_location) { std::vector<std::unique_ptr<const DexFile>> dex_files; std::string error_msg; - bool result = DexFile::Open(input_dex.c_str(), - input_dex, - false, - &error_msg, - &dex_files); + bool result = DexFileLoader::Open(input_dex.c_str(), + input_dex, + false, + &error_msg, + &dex_files); ASSERT_TRUE(result) << error_msg; ASSERT_GE(dex_files.size(), 1u); diff --git a/dexlist/dexlist.cc b/dexlist/dexlist.cc index 6a1e22a525..e5870522a3 100644 --- a/dexlist/dexlist.cc +++ b/dexlist/dexlist.cc @@ -27,6 +27,7 @@ #include <stdlib.h> #include "dex_file-inl.h" +#include "dex_file_loader.h" #include "mem_map.h" #include "runtime.h" @@ -178,7 +179,7 @@ static int processFile(const char* fileName) { static constexpr bool kVerifyChecksum = true; std::string error_msg; std::vector<std::unique_ptr<const DexFile>> dex_files; - if (!DexFile::Open(fileName, fileName, kVerifyChecksum, &error_msg, &dex_files)) { + if (!DexFileLoader::Open(fileName, fileName, kVerifyChecksum, &error_msg, &dex_files)) { fputs(error_msg.c_str(), stderr); fputc('\n', stderr); return -1; diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc index 32dd69e0e6..0282fbce1f 100644 --- a/openjdkjvmti/events.cc +++ b/openjdkjvmti/events.cc @@ -842,6 +842,12 @@ void EventHandler::HandleLocalAccessCapabilityAdded() { bool operator()(art::ObjPtr<art::mirror::Class> klass) OVERRIDE REQUIRES(art::Locks::mutator_lock_) { + if (!klass->IsLoaded()) { + // Skip classes that aren't loaded since they might not have fully allocated and initialized + // their methods. Furthemore since the jvmti-plugin must have been loaded by this point + // these methods will definitately be using debuggable code. + return true; + } for (auto& m : klass->GetMethods(art::kRuntimePointerSize)) { const void* code = m.GetEntryPointFromQuickCompiledCode(); if (m.IsNative() || m.IsProxyMethod()) { diff --git a/openjdkjvmti/fixed_up_dex_file.cc b/openjdkjvmti/fixed_up_dex_file.cc index 5bfa5ca491..c4988695f1 100644 --- a/openjdkjvmti/fixed_up_dex_file.cc +++ b/openjdkjvmti/fixed_up_dex_file.cc @@ -30,6 +30,7 @@ */ #include "fixed_up_dex_file.h" +#include "dex_file_loader.h" #include "dex_file-inl.h" // Runtime includes. @@ -68,7 +69,7 @@ std::unique_ptr<FixedUpDexFile> FixedUpDexFile::Create(const art::DexFile& origi data.resize(original.Size()); memcpy(data.data(), original.Begin(), original.Size()); std::string error; - std::unique_ptr<const art::DexFile> new_dex_file(art::DexFile::Open( + std::unique_ptr<const art::DexFile> new_dex_file(art::DexFileLoader::Open( data.data(), data.size(), /*location*/"Unquickening_dexfile.dex", diff --git a/openjdkjvmti/ti_class.cc b/openjdkjvmti/ti_class.cc index daf4a8b7f2..5f29416134 100644 --- a/openjdkjvmti/ti_class.cc +++ b/openjdkjvmti/ti_class.cc @@ -43,6 +43,7 @@ #include "class_table-inl.h" #include "common_throws.h" #include "dex_file_annotations.h" +#include "dex_file_loader.h" #include "events-inl.h" #include "fixed_up_dex_file.h" #include "gc/heap-visit-objects-inl.h" @@ -106,12 +107,12 @@ static std::unique_ptr<const art::DexFile> MakeSingleDexFile(art::Thread* self, } uint32_t checksum = reinterpret_cast<const art::DexFile::Header*>(map->Begin())->checksum_; std::string map_name = map->GetName(); - std::unique_ptr<const art::DexFile> dex_file(art::DexFile::Open(map_name, - checksum, - std::move(map), - /*verify*/true, - /*verify_checksum*/true, - &error_msg)); + std::unique_ptr<const art::DexFile> dex_file(art::DexFileLoader::Open(map_name, + checksum, + std::move(map), + /*verify*/true, + /*verify_checksum*/true, + &error_msg)); if (dex_file.get() == nullptr) { LOG(WARNING) << "Unable to load modified dex file for " << descriptor << ": " << error_msg; art::ThrowClassFormatError(nullptr, diff --git a/openjdkjvmti/ti_method.cc b/openjdkjvmti/ti_method.cc index f05977a4b1..50402a04a9 100644 --- a/openjdkjvmti/ti_method.cc +++ b/openjdkjvmti/ti_method.cc @@ -572,8 +572,9 @@ class CommonLocalVariableClosure : public art::Closure { return; } art::ArtMethod* method = visitor.GetMethod(); - if (method->IsNative()) { - // TODO We really should support get/set for non-shadow frames. + // Native and 'art' proxy methods don't have registers. + if (method->IsNative() || method->IsProxyMethod()) { + // TODO It might be useful to fake up support for get at least on proxy frames. result_ = ERR(OPAQUE_FRAME); return; } else if (method->GetCodeItem()->registers_size_ <= slot_) { diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc index 5d9bf2ce6e..53abfbca00 100644 --- a/openjdkjvmti/ti_redefine.cc +++ b/openjdkjvmti/ti_redefine.cc @@ -44,6 +44,7 @@ #include "class_linker-inl.h" #include "debugger.h" #include "dex_file.h" +#include "dex_file_loader.h" #include "dex_file_types.h" #include "events-inl.h" #include "gc/allocation_listener.h" @@ -425,12 +426,12 @@ jvmtiError Redefiner::AddRedefinition(ArtJvmTiEnv* env, const ArtClassDefinition return ERR(INVALID_CLASS_FORMAT); } uint32_t checksum = reinterpret_cast<const art::DexFile::Header*>(map->Begin())->checksum_; - std::unique_ptr<const art::DexFile> dex_file(art::DexFile::Open(map->GetName(), - checksum, - std::move(map), - /*verify*/true, - /*verify_checksum*/true, - error_msg_)); + std::unique_ptr<const art::DexFile> dex_file(art::DexFileLoader::Open(map->GetName(), + checksum, + std::move(map), + /*verify*/true, + /*verify_checksum*/true, + error_msg_)); if (dex_file.get() == nullptr) { os << "Unable to load modified dex file for " << def.GetName() << ": " << *error_msg_; *error_msg_ = os.str(); @@ -1096,15 +1097,19 @@ bool Redefiner::ClassRedefinition::CheckVerification(const RedefinitionDataIter& hs.NewHandle(GetClassLoader()), dex_file_->GetClassDef(0), /*class_def*/ nullptr, /*compiler_callbacks*/ - false, /*allow_soft_failures*/ + true, /*allow_soft_failures*/ /*log_level*/ art::verifier::HardFailLogMode::kLogWarning, &error); - bool passes = failure == art::verifier::FailureKind::kNoFailure; - if (!passes) { - RecordFailure(ERR(FAILS_VERIFICATION), "Failed to verify class. Error was: " + error); + switch (failure) { + case art::verifier::FailureKind::kNoFailure: + case art::verifier::FailureKind::kSoftFailure: + return true; + case art::verifier::FailureKind::kHardFailure: { + RecordFailure(ERR(FAILS_VERIFICATION), "Failed to verify class. Error was: " + error); + return false; + } } - return passes; } // Looks through the previously allocated cookies to see if we need to update them with another new @@ -1399,7 +1404,9 @@ void Redefiner::ClassRedefinition::UpdateMethods(art::ObjPtr<art::mirror::Class> method.SetNotIntrinsic(); // Notify the jit that this method is redefined. art::jit::Jit* jit = driver_->runtime_->GetJit(); - if (jit != nullptr) { + // Non-invokable methods don't have any JIT data associated with them so we don't need to tell + // the jit about them. + if (jit != nullptr && method.IsInvokable()) { jit->GetCodeCache()->NotifyMethodRedefined(&method); } } diff --git a/openjdkjvmti/ti_search.cc b/openjdkjvmti/ti_search.cc index 25bc5d6eb3..bafc8552b1 100644 --- a/openjdkjvmti/ti_search.cc +++ b/openjdkjvmti/ti_search.cc @@ -39,6 +39,7 @@ #include "base/macros.h" #include "class_linker.h" #include "dex_file.h" +#include "dex_file_loader.h" #include "jni_internal.h" #include "mirror/class-inl.h" #include "mirror/object.h" @@ -226,7 +227,7 @@ jvmtiError SearchUtil::AddToBootstrapClassLoaderSearch(jvmtiEnv* env ATTRIBUTE_U std::string error_msg; std::vector<std::unique_ptr<const art::DexFile>> dex_files; - if (!art::DexFile::Open(segment, segment, true, &error_msg, &dex_files)) { + if (!art::DexFileLoader::Open(segment, segment, true, &error_msg, &dex_files)) { LOG(WARNING) << "Could not open " << segment << " for boot classpath extension: " << error_msg; return ERR(ILLEGAL_ARGUMENT); } diff --git a/openjdkjvmti/ti_stack.cc b/openjdkjvmti/ti_stack.cc index d4cc42ae70..e0c139954d 100644 --- a/openjdkjvmti/ti_stack.cc +++ b/openjdkjvmti/ti_stack.cc @@ -789,7 +789,7 @@ jvmtiError StackUtil::GetFrameLocation(jvmtiEnv* env ATTRIBUTE_UNUSED, } *method_ptr = art::jni::EncodeArtMethod(closure.method); - if (closure.method->IsNative()) { + if (closure.method->IsNative() || closure.method->IsProxyMethod()) { *location_ptr = -1; } else { if (closure.dex_pc == art::dex::kDexNoIndex) { diff --git a/profman/profman.cc b/profman/profman.cc index 9b4f5794b7..8ccf7b4c1d 100644 --- a/profman/profman.cc +++ b/profman/profman.cc @@ -39,6 +39,7 @@ #include "boot_image_profile.h" #include "bytecode_utils.h" #include "dex_file.h" +#include "dex_file_loader.h" #include "dex_file_types.h" #include "jit/profile_compilation_info.h" #include "profile_assistant.h" @@ -328,21 +329,21 @@ class ProfMan FINAL { std::string error_msg; std::vector<std::unique_ptr<const DexFile>> dex_files_for_location; if (use_apk_fd_list) { - if (DexFile::OpenZip(apks_fd_[i], - dex_locations_[i], - kVerifyChecksum, - &error_msg, - &dex_files_for_location)) { + if (DexFileLoader::OpenZip(apks_fd_[i], + dex_locations_[i], + kVerifyChecksum, + &error_msg, + &dex_files_for_location)) { } else { LOG(WARNING) << "OpenZip failed for '" << dex_locations_[i] << "' " << error_msg; continue; } } else { - if (DexFile::Open(apk_files_[i].c_str(), - dex_locations_[i], - kVerifyChecksum, - &error_msg, - &dex_files_for_location)) { + if (DexFileLoader::Open(apk_files_[i].c_str(), + dex_locations_[i], + kVerifyChecksum, + &error_msg, + &dex_files_for_location)) { } else { LOG(WARNING) << "Open failed for '" << dex_locations_[i] << "' " << error_msg; continue; @@ -795,7 +796,7 @@ class ProfMan FINAL { const DexFile* dex_file = class_ref.dex_file; const auto& dex_resolved_classes = resolved_class_set.emplace( dex_file->GetLocation(), - dex_file->GetBaseLocation(), + DexFileLoader::GetBaseLocation(dex_file->GetLocation()), dex_file->GetLocationChecksum(), dex_file->NumMethodIds()); dex_resolved_classes.first->AddClass(class_ref.TypeIndex()); diff --git a/runtime/Android.bp b/runtime/Android.bp index 711bc65892..ed9906a5ec 100644 --- a/runtime/Android.bp +++ b/runtime/Android.bp @@ -55,6 +55,7 @@ cc_defaults { "compiler_filter.cc", "debugger.cc", "dex_file.cc", + "dex_file_loader.cc", "dex_file_annotations.cc", "dex_file_layout.cc", "dex_file_tracking_registrar.cc", @@ -152,6 +153,7 @@ cc_defaults { "mirror/throwable.cc", "monitor.cc", "native_bridge_art_interface.cc", + "native_dex_file.cc", "native_stack_dump.cc", "native/dalvik_system_DexFile.cc", "native/dalvik_system_VMDebug.cc", @@ -530,6 +532,7 @@ art_cc_test { "barrier_test.cc", "base/arena_allocator_test.cc", "base/bit_field_test.cc", + "base/bit_struct_test.cc", "base/bit_utils_test.cc", "base/bit_vector_test.cc", "base/hash_set_test.cc", diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc index 8738adfada..c48e30f659 100644 --- a/runtime/base/arena_allocator.cc +++ b/runtime/base/arena_allocator.cc @@ -149,7 +149,10 @@ void ArenaAllocatorStatsImpl<kCount>::Dump(std::ostream& os, const Arena* first, os << "===== Allocation by kind\n"; static_assert(arraysize(kAllocNames) == kNumArenaAllocKinds, "arraysize of kAllocNames"); for (int i = 0; i < kNumArenaAllocKinds; i++) { + // Reduce output by listing only allocation kinds that actually have allocations. + if (alloc_stats_[i] != 0u) { os << kAllocNames[i] << std::setw(10) << alloc_stats_[i] << "\n"; + } } } diff --git a/runtime/base/arena_allocator_test.cc b/runtime/base/arena_allocator_test.cc index e2c2e2fc6d..6bf56c8c51 100644 --- a/runtime/base/arena_allocator_test.cc +++ b/runtime/base/arena_allocator_test.cc @@ -23,9 +23,9 @@ namespace art { class ArenaAllocatorTest : public testing::Test { protected: - size_t NumberOfArenas(ArenaAllocator* arena) { + size_t NumberOfArenas(ArenaAllocator* allocator) { size_t result = 0u; - for (Arena* a = arena->arena_head_; a != nullptr; a = a->next_) { + for (Arena* a = allocator->arena_head_; a != nullptr; a = a->next_) { ++result; } return result; diff --git a/runtime/base/arena_containers.h b/runtime/base/arena_containers.h index 62b974ee60..2e71156ee8 100644 --- a/runtime/base/arena_containers.h +++ b/runtime/base/arena_containers.h @@ -137,22 +137,22 @@ class ArenaAllocatorAdapter<void> : private ArenaAllocatorAdapterKind { typedef ArenaAllocatorAdapter<U> other; }; - explicit ArenaAllocatorAdapter(ArenaAllocator* arena_allocator, + explicit ArenaAllocatorAdapter(ArenaAllocator* allocator, ArenaAllocKind kind = kArenaAllocSTL) : ArenaAllocatorAdapterKind(kind), - arena_allocator_(arena_allocator) { + allocator_(allocator) { } template <typename U> ArenaAllocatorAdapter(const ArenaAllocatorAdapter<U>& other) // NOLINT, implicit : ArenaAllocatorAdapterKind(other), - arena_allocator_(other.arena_allocator_) { + allocator_(other.allocator_) { } ArenaAllocatorAdapter(const ArenaAllocatorAdapter&) = default; ArenaAllocatorAdapter& operator=(const ArenaAllocatorAdapter&) = default; ~ArenaAllocatorAdapter() = default; private: - ArenaAllocator* arena_allocator_; + ArenaAllocator* allocator_; template <typename U> friend class ArenaAllocatorAdapter; @@ -174,14 +174,14 @@ class ArenaAllocatorAdapter : private ArenaAllocatorAdapterKind { typedef ArenaAllocatorAdapter<U> other; }; - ArenaAllocatorAdapter(ArenaAllocator* arena_allocator, ArenaAllocKind kind) + ArenaAllocatorAdapter(ArenaAllocator* allocator, ArenaAllocKind kind) : ArenaAllocatorAdapterKind(kind), - arena_allocator_(arena_allocator) { + allocator_(allocator) { } template <typename U> ArenaAllocatorAdapter(const ArenaAllocatorAdapter<U>& other) // NOLINT, implicit : ArenaAllocatorAdapterKind(other), - arena_allocator_(other.arena_allocator_) { + allocator_(other.allocator_) { } ArenaAllocatorAdapter(const ArenaAllocatorAdapter&) = default; ArenaAllocatorAdapter& operator=(const ArenaAllocatorAdapter&) = default; @@ -197,10 +197,10 @@ class ArenaAllocatorAdapter : private ArenaAllocatorAdapterKind { pointer allocate(size_type n, ArenaAllocatorAdapter<void>::pointer hint ATTRIBUTE_UNUSED = nullptr) { DCHECK_LE(n, max_size()); - return arena_allocator_->AllocArray<T>(n, ArenaAllocatorAdapterKind::Kind()); + return allocator_->AllocArray<T>(n, ArenaAllocatorAdapterKind::Kind()); } void deallocate(pointer p, size_type n) { - arena_allocator_->MakeInaccessible(p, sizeof(T) * n); + allocator_->MakeInaccessible(p, sizeof(T) * n); } template <typename U, typename... Args> @@ -213,7 +213,7 @@ class ArenaAllocatorAdapter : private ArenaAllocatorAdapterKind { } private: - ArenaAllocator* arena_allocator_; + ArenaAllocator* allocator_; template <typename U> friend class ArenaAllocatorAdapter; @@ -226,7 +226,7 @@ class ArenaAllocatorAdapter : private ArenaAllocatorAdapterKind { template <typename T> inline bool operator==(const ArenaAllocatorAdapter<T>& lhs, const ArenaAllocatorAdapter<T>& rhs) { - return lhs.arena_allocator_ == rhs.arena_allocator_; + return lhs.allocator_ == rhs.allocator_; } template <typename T> diff --git a/runtime/base/arena_object.h b/runtime/base/arena_object.h index 2d8e7d8592..ed00babd62 100644 --- a/runtime/base/arena_object.h +++ b/runtime/base/arena_object.h @@ -32,8 +32,8 @@ class ArenaObject { return allocator->Alloc(size, kAllocKind); } - static void* operator new(size_t size, ScopedArenaAllocator* arena) { - return arena->Alloc(size, kAllocKind); + static void* operator new(size_t size, ScopedArenaAllocator* allocator) { + return allocator->Alloc(size, kAllocKind); } void operator delete(void*, size_t) { @@ -56,8 +56,8 @@ class DeletableArenaObject { return allocator->Alloc(size, kAllocKind); } - static void* operator new(size_t size, ScopedArenaAllocator* arena) { - return arena->Alloc(size, kAllocKind); + static void* operator new(size_t size, ScopedArenaAllocator* allocator) { + return allocator->Alloc(size, kAllocKind); } void operator delete(void*, size_t) { diff --git a/runtime/base/bit_struct.h b/runtime/base/bit_struct.h new file mode 100644 index 0000000000..1f86ee1917 --- /dev/null +++ b/runtime/base/bit_struct.h @@ -0,0 +1,290 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_BASE_BIT_STRUCT_H_ +#define ART_RUNTIME_BASE_BIT_STRUCT_H_ + +#include "bit_struct_detail.h" +#include "bit_utils.h" + +// +// Zero-cost, type-safe, well-defined "structs" of bit fields. +// +// --------------------------------------------- +// Usage example: +// --------------------------------------------- +// +// // Definition for type 'Example' +// BITSTRUCT_DEFINE_START(Example, 10) +// BitStructUint<0, 2> u2; // Every field must be a BitStruct[*]. +// BitStructInt<2, 7> i7; +// BitStructUint<9, 1> i1; +// BITSTRUCT_DEFINE_END(Example); +// +// Would define a bit struct with this layout: +// <- 1 -> <-- 7 --> <- 2 -> +// +--------+---------------+-----+ +// | i1 | i7 | u2 + +// +--------+---------------+-----+ +// 10 9 2 0 +// +// // Read-write just like regular values. +// Example ex; +// ex.u2 = 3; +// ex.i7 = -25; +// ex.i1 = true; +// size_t u2 = ex.u2; +// int i7 = ex.i7; +// bool i1 = ex.i1; +// +// // It's packed down to the smallest # of machine words. +// assert(sizeof(Example) == 2); +// // The exact bit pattern is well-defined by the template parameters. +// uint16_t cast = *reinterpret_cast<uint16_t*>(ex); +// assert(cast == ((3) | (0b100111 << 2) | (true << 9); +// +// --------------------------------------------- +// Why not just use C++ bitfields? +// --------------------------------------------- +// +// The layout is implementation-defined. +// We do not know whether the fields are packed left-to-right or +// right-to-left, so it makes it useless when the memory layout needs to be +// precisely controlled. +// +// --------------------------------------------- +// More info: +// --------------------------------------------- +// Currently uintmax_t is the largest supported underlying storage type, +// all (kBitOffset + kBitWidth) must fit into BitSizeOf<uintmax_t>(); +// +// Using BitStruct[U]int will automatically select an underlying type +// that's the smallest to fit your (offset + bitwidth). +// +// BitStructNumber can be used to manually select an underlying type. +// +// BitStructField can be used with custom standard-layout structs, +// thus allowing for arbitrary nesting of bit structs. +// +namespace art { +// Zero-cost wrapper around a struct 'T', allowing it to be stored as a bitfield +// at offset 'kBitOffset' and width 'kBitWidth'. +// The storage is plain unsigned int, whose size is the smallest required to fit +// 'kBitOffset + kBitWidth'. All operations to this become BitFieldExtract/BitFieldInsert +// operations to the underlying uint. +// +// Field memory representation: +// +// MSB <-- width --> LSB +// +--------+------------+--------+ +// | ?????? | u bitfield | ?????? + +// +--------+------------+--------+ +// offset 0 +// +// Reading/writing the bitfield (un)packs it into a temporary T: +// +// MSB <-- width --> LSB +// +-----------------+------------+ +// | 0.............0 | T bitfield | +// +-----------------+------------+ +// 0 +// +// It's the responsibility of the StorageType to ensure the bit representation +// of T can be represented by kBitWidth. +template <typename T, + size_t kBitOffset, + size_t kBitWidth = BitStructSizeOf<T>(), + typename StorageType = typename detail::MinimumTypeUnsignedHelper<kBitOffset + kBitWidth>::type> +struct BitStructField { + static_assert(std::is_standard_layout<T>::value, "T must be standard layout"); + + operator T() const { + return Get(); + } + + // Exclude overload when T==StorageType. + template <typename _ = void, + typename = std::enable_if_t<std::is_same<T, StorageType>::value, _>> + explicit operator StorageType() const { + return GetStorage(); + } + + BitStructField& operator=(T value) { + return Assign(*this, value); + } + + static constexpr size_t BitStructSizeOf() { + return kBitWidth; + } + + protected: + template <typename T2> + T2& Assign(T2& what, T value) { + // Since C++ doesn't allow the type of operator= to change out + // in the subclass, reimplement operator= in each subclass + // manually and call this helper function. + static_assert(std::is_base_of<BitStructField, T2>::value, "T2 must inherit BitStructField"); + what.Set(value); + return what; + } + + T Get() const { + ValueStorage vs; + vs.pod_.val_ = GetStorage(); + return vs.value_; + } + + void Set(T value) { + ValueStorage value_as_storage; + value_as_storage.value_ = value; + + storage_.pod_.val_ = BitFieldInsert(storage_.pod_.val_, + value_as_storage.pod_.val_, + kBitOffset, + kBitWidth); + } + + private: + StorageType GetStorage() const { + return BitFieldExtract(storage_.pod_.val_, kBitOffset, kBitWidth); + } + + // Underlying value must be wrapped in a separate standard-layout struct. + // See below for more details. + struct PodWrapper { + StorageType val_; + }; + + union ValueStorage { + // Safely alias pod_ and value_ together. + // + // See C++ 9.5.1 [class.union]: + // If a standard-layout union contains several standard-layout structs that share a common + // initial sequence ... it is permitted to inspect the common initial sequence of any of + // standard-layout struct members. + PodWrapper pod_; + T value_; + } storage_; + + // Future work: In theory almost non-standard layout can be supported here, + // assuming they don't rely on the address of (this). + // We just have to use memcpy since the union-aliasing would not work. +}; + +// Base class for number-like BitStruct fields. +// T is the type to store in as a bit field. +// kBitOffset, kBitWidth define the position and length of the bitfield. +// +// (Common usage should be BitStructInt, BitStructUint -- this +// intermediate template allows a user-defined integer to be used.) +template <typename T, size_t kBitOffset, size_t kBitWidth> +struct BitStructNumber : public BitStructField<T, kBitOffset, kBitWidth, /*StorageType*/T> { + using StorageType = T; + + BitStructNumber& operator=(T value) { + return BaseType::Assign(*this, value); + } + + /*implicit*/ operator T() const { + return Get(); + } + + explicit operator bool() const { + return static_cast<bool>(Get()); + } + + BitStructNumber& operator++() { + *this = Get() + 1u; + return *this; + } + + StorageType operator++(int) { + return Get() + 1u; + } + + BitStructNumber& operator--() { + *this = Get() - 1u; + return *this; + } + + StorageType operator--(int) { + return Get() - 1u; + } + + private: + using BaseType = BitStructField<T, kBitOffset, kBitWidth, /*StorageType*/T>; + using BaseType::Get; +}; + +// Create a BitStruct field which uses the smallest underlying int storage type, +// in order to be large enough to fit (kBitOffset + kBitWidth). +// +// Values are sign-extended when they are read out. +template <size_t kBitOffset, size_t kBitWidth> +using BitStructInt = + BitStructNumber<typename detail::MinimumTypeHelper<int, kBitOffset + kBitWidth>::type, + kBitOffset, + kBitWidth>; + +// Create a BitStruct field which uses the smallest underlying uint storage type, +// in order to be large enough to fit (kBitOffset + kBitWidth). +// +// Values are zero-extended when they are read out. +template <size_t kBitOffset, size_t kBitWidth> +using BitStructUint = + BitStructNumber<typename detail::MinimumTypeHelper<unsigned int, kBitOffset + kBitWidth>::type, + kBitOffset, + kBitWidth>; + +// Start a definition for a bitstruct. +// A bitstruct is defined to be a union with a common initial subsequence +// that we call 'DefineBitStructSize<bitwidth>'. +// +// See top of file for usage example. +// +// This marker is required by the C++ standard in order to +// have a "common initial sequence". +// +// See C++ 9.5.1 [class.union]: +// If a standard-layout union contains several standard-layout structs that share a common +// initial sequence ... it is permitted to inspect the common initial sequence of any of +// standard-layout struct members. +#define BITSTRUCT_DEFINE_START(name, bitwidth) \ + union name { \ + art::detail::DefineBitStructSize<(bitwidth)> _; \ + static constexpr size_t BitStructSizeOf() { return (bitwidth); } + +// End the definition of a bitstruct, and insert a sanity check +// to ensure that the bitstruct did not exceed the specified size. +// +// See top of file for usage example. +#define BITSTRUCT_DEFINE_END(name) \ + }; /* NOLINT [readability/braces] [4] */ \ + static_assert(art::detail::ValidateBitStructSize<name>(), \ + #name "bitsize incorrect: " \ + "did you insert extra fields that weren't BitStructX, " \ + "and does the size match the sum of the field widths?") + +// Determine the minimal bit size for a user-defined type T. +// Used by BitStructField to determine how small a custom type is. +template <typename T> +static constexpr size_t BitStructSizeOf() { + return T::BitStructSizeOf(); +} + +} // namespace art + +#endif // ART_RUNTIME_BASE_BIT_STRUCT_H_ diff --git a/runtime/base/bit_struct_detail.h b/runtime/base/bit_struct_detail.h new file mode 100644 index 0000000000..9f629c0970 --- /dev/null +++ b/runtime/base/bit_struct_detail.h @@ -0,0 +1,90 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_BASE_BIT_STRUCT_DETAIL_H_ +#define ART_RUNTIME_BASE_BIT_STRUCT_DETAIL_H_ + +#include "bit_utils.h" +#include "globals.h" + +#include <type_traits> + +// Implementation details for bit_struct.h +// Not intended to be used stand-alone. + +namespace art { + +template <typename T> +static constexpr size_t BitStructSizeOf(); + +namespace detail { + // Select the smallest uintX_t that will fit kBitSize bits. + template <size_t kBitSize> + struct MinimumTypeUnsignedHelper { + using type = + typename std::conditional<kBitSize == 0, void, + typename std::conditional<kBitSize <= 8, uint8_t, + typename std::conditional<kBitSize <= 16, uint16_t, + typename std::conditional<kBitSize <= 32, uint32_t, + typename std::conditional<kBitSize <= 64, uint64_t, + typename std::conditional<kBitSize <= BitSizeOf<uintmax_t>(), uintmax_t, + void>::type>::type>::type>::type>::type>::type; + }; + + // Select the smallest [u]intX_t that will fit kBitSize bits. + // Automatically picks intX_t or uintX_t based on the sign-ness of T. + template <typename T, size_t kBitSize> + struct MinimumTypeHelper { + using type_unsigned = typename MinimumTypeUnsignedHelper<kBitSize>::type; + + using type = + typename std::conditional</* if */ std::is_signed<T>::value, + /* then */ typename std::make_signed<type_unsigned>::type, + /* else */ type_unsigned>::type; + }; + + // Ensure the minimal type storage for 'T' matches its declared BitStructSizeOf. + // Nominally used by the BITSTRUCT_DEFINE_END macro. + template <typename T> + static constexpr bool ValidateBitStructSize() { + const size_t kBitStructSizeOf = BitStructSizeOf<T>(); + const size_t kExpectedSize = (BitStructSizeOf<T>() < kBitsPerByte) + ? kBitsPerByte + : RoundUpToPowerOfTwo(kBitStructSizeOf); + + // Ensure no extra fields were added in between START/END. + const size_t kActualSize = sizeof(T) * kBitsPerByte; + return kExpectedSize == kActualSize; + } + + // Denotes the beginning of a bit struct. + // + // This marker is required by the C++ standard in order to + // have a "common initial sequence". + // + // See C++ 9.5.1 [class.union]: + // If a standard-layout union contains several standard-layout structs that share a common + // initial sequence ... it is permitted to inspect the common initial sequence of any of + // standard-layout struct members. + template <size_t kSize> + struct DefineBitStructSize { + private: + typename MinimumTypeUnsignedHelper<kSize>::type _; + }; +} // namespace detail +} // namespace art + +#endif // ART_RUNTIME_BASE_BIT_STRUCT_DETAIL_H_ diff --git a/runtime/base/bit_struct_test.cc b/runtime/base/bit_struct_test.cc new file mode 100644 index 0000000000..872ada324c --- /dev/null +++ b/runtime/base/bit_struct_test.cc @@ -0,0 +1,269 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "bit_struct.h" + +#include "gtest/gtest.h" + +namespace art { + +// A copy of detail::ValidateBitStructSize that uses EXPECT for a more +// human-readable message. +template <typename T> +static constexpr bool ValidateBitStructSize(const char* name) { + const size_t kBitStructSizeOf = BitStructSizeOf<T>(); + const size_t kExpectedSize = (BitStructSizeOf<T>() < kBitsPerByte) + ? kBitsPerByte + : RoundUpToPowerOfTwo(kBitStructSizeOf); + + // Ensure no extra fields were added in between START/END. + const size_t kActualSize = sizeof(T) * kBitsPerByte; + EXPECT_EQ(kExpectedSize, kActualSize) << name; + return true; +} + +#define VALIDATE_BITSTRUCT_SIZE(type) ValidateBitStructSize<type>(#type) + +TEST(BitStructs, MinimumType) { + EXPECT_EQ(1u, sizeof(typename detail::MinimumTypeUnsignedHelper<1>::type)); + EXPECT_EQ(1u, sizeof(typename detail::MinimumTypeUnsignedHelper<2>::type)); + EXPECT_EQ(1u, sizeof(typename detail::MinimumTypeUnsignedHelper<3>::type)); + EXPECT_EQ(1u, sizeof(typename detail::MinimumTypeUnsignedHelper<8>::type)); + EXPECT_EQ(2u, sizeof(typename detail::MinimumTypeUnsignedHelper<9>::type)); + EXPECT_EQ(2u, sizeof(typename detail::MinimumTypeUnsignedHelper<10>::type)); + EXPECT_EQ(2u, sizeof(typename detail::MinimumTypeUnsignedHelper<15>::type)); + EXPECT_EQ(2u, sizeof(typename detail::MinimumTypeUnsignedHelper<16>::type)); + EXPECT_EQ(4u, sizeof(typename detail::MinimumTypeUnsignedHelper<17>::type)); + EXPECT_EQ(4u, sizeof(typename detail::MinimumTypeUnsignedHelper<32>::type)); + EXPECT_EQ(8u, sizeof(typename detail::MinimumTypeUnsignedHelper<33>::type)); + EXPECT_EQ(8u, sizeof(typename detail::MinimumTypeUnsignedHelper<64>::type)); +} + +template <typename T> +size_t AsUint(const T& value) { + size_t uint_value = 0; + memcpy(&uint_value, &value, sizeof(value)); + return uint_value; +} + +struct CustomBitStruct { + CustomBitStruct() = default; + explicit CustomBitStruct(int8_t data) : data(data) {} + + static constexpr size_t BitStructSizeOf() { + return 4; + } + + int8_t data; +}; + +template <typename T> +void ZeroInitialize(T& value) { + memset(&value, 0, sizeof(T)); + // TODO: replace with value initialization +} + +TEST(BitStructs, Custom) { + CustomBitStruct expected(0b1111); + + BitStructField<CustomBitStruct, /*lsb*/4, /*width*/4> f; + ZeroInitialize(f); + + EXPECT_EQ(1u, sizeof(f)); + + f = CustomBitStruct(0b1111); + + CustomBitStruct read_out = f; + EXPECT_EQ(read_out.data, 0b1111); + + EXPECT_EQ(AsUint(f), 0b11110000u); +} + +BITSTRUCT_DEFINE_START(TestTwoCustom, /* size */ 8) + BitStructField<CustomBitStruct, /*lsb*/0, /*width*/4> f4_a; + BitStructField<CustomBitStruct, /*lsb*/4, /*width*/4> f4_b; +BITSTRUCT_DEFINE_END(TestTwoCustom); + +TEST(BitStructs, TwoCustom) { + EXPECT_EQ(sizeof(TestTwoCustom), 1u); + + VALIDATE_BITSTRUCT_SIZE(TestTwoCustom); + + TestTwoCustom cst; + ZeroInitialize(cst); + + // Test the write to most-significant field doesn't clobber least-significant. + cst.f4_a = CustomBitStruct(0b0110); + cst.f4_b = CustomBitStruct(0b0101); + + int8_t read_out = static_cast<CustomBitStruct>(cst.f4_a).data; + int8_t read_out_b = static_cast<CustomBitStruct>(cst.f4_b).data; + + EXPECT_EQ(0b0110, static_cast<int>(read_out)); + EXPECT_EQ(0b0101, static_cast<int>(read_out_b)); + + EXPECT_EQ(AsUint(cst), 0b01010110u); + + // Test write to least-significant field doesn't clobber most-significant. + cst.f4_a = CustomBitStruct(0); + + read_out = static_cast<CustomBitStruct>(cst.f4_a).data; + read_out_b = static_cast<CustomBitStruct>(cst.f4_b).data; + + EXPECT_EQ(0b0, static_cast<int>(read_out)); + EXPECT_EQ(0b0101, static_cast<int>(read_out_b)); + + EXPECT_EQ(AsUint(cst), 0b01010000u); +} + +TEST(BitStructs, Number) { + BitStructNumber<uint16_t, /*lsb*/4, /*width*/4> bsn; + ZeroInitialize(bsn); + EXPECT_EQ(2u, sizeof(bsn)); + + bsn = 0b1111; + + uint32_t read_out = static_cast<uint32_t>(bsn); + uint32_t read_out_impl = bsn; + + EXPECT_EQ(read_out, read_out_impl); + EXPECT_EQ(read_out, 0b1111u); + EXPECT_EQ(AsUint(bsn), 0b11110000u); +} + +BITSTRUCT_DEFINE_START(TestBitStruct, /* size */ 8) + BitStructInt</*lsb*/0, /*width*/3> i3; + BitStructUint</*lsb*/3, /*width*/4> u4; + + BitStructUint</*lsb*/0, /*width*/7> alias_all; +BITSTRUCT_DEFINE_END(TestBitStruct); + +TEST(BitStructs, Test1) { + { + // Check minimal size selection is correct. + BitStructInt</*lsb*/0, /*width*/3> i3; + BitStructUint</*lsb*/3, /*width*/4> u4; + + BitStructUint</*lsb*/0, /*width*/7> alias_all; + + EXPECT_EQ(1u, sizeof(i3)); + EXPECT_EQ(1u, sizeof(u4)); + EXPECT_EQ(1u, sizeof(alias_all)); + } + TestBitStruct tst; + ZeroInitialize(tst); + + // Check minimal size selection is correct. + EXPECT_EQ(1u, sizeof(TestBitStruct)); + EXPECT_EQ(1u, sizeof(tst._)); + EXPECT_EQ(1u, sizeof(tst.i3)); + EXPECT_EQ(1u, sizeof(tst.u4)); + EXPECT_EQ(1u, sizeof(tst.alias_all)); + + // Check operator assignment. + tst.i3 = -1; + tst.u4 = 0b1010; + + // Check implicit operator conversion. + int8_t read_i3 = tst.i3; + uint8_t read_u4 = tst.u4; + + // Ensure read-out values were correct. + EXPECT_EQ(static_cast<int8_t>(-1), read_i3); + EXPECT_EQ(0b1010, read_u4); + + // Ensure aliasing is working. + EXPECT_EQ(0b1010111, static_cast<uint8_t>(tst.alias_all)); + + // Ensure the bit pattern is correct. + EXPECT_EQ(0b1010111u, AsUint(tst)); + + // Math operator checks + { + // In-place + ++tst.u4; + EXPECT_EQ(static_cast<uint8_t>(0b1011), static_cast<uint8_t>(tst.u4)); + --tst.u4; + EXPECT_EQ(static_cast<uint8_t>(0b1010), static_cast<uint8_t>(tst.u4)); + + // Copy + uint8_t read_and_convert = tst.u4++; + EXPECT_EQ(static_cast<uint8_t>(0b1011), read_and_convert); + EXPECT_EQ(static_cast<uint8_t>(0b1010), static_cast<uint8_t>(tst.u4)); + read_and_convert = tst.u4--; + EXPECT_EQ(static_cast<uint8_t>(0b1001), read_and_convert); + EXPECT_EQ(static_cast<uint8_t>(0b1010), static_cast<uint8_t>(tst.u4)); + + // Check boolean operator conversion. + tst.u4 = 0b1010; + EXPECT_TRUE(static_cast<bool>(tst.u4)); + bool succ = tst.u4 ? true : false; + EXPECT_TRUE(succ); + + tst.u4 = 0; + EXPECT_FALSE(static_cast<bool>(tst.u4)); + +/* + // Disabled: Overflow is caught by the BitFieldInsert DCHECKs. + // Check overflow for uint. + tst.u4 = 0b1111; + ++tst.u4; + EXPECT_EQ(static_cast<uint8_t>(0), static_cast<uint8_t>(tst.u4)); +*/ + } +} + +BITSTRUCT_DEFINE_START(MixedSizeBitStruct, /* size */ 32) + BitStructUint</*lsb*/0, /*width*/3> u3; + BitStructUint</*lsb*/3, /*width*/10> u10; + BitStructUint</*lsb*/13, /*width*/19> u19; + + BitStructUint</*lsb*/0, /*width*/32> alias_all; +BITSTRUCT_DEFINE_END(MixedSizeBitStruct); + +// static_assert(sizeof(MixedSizeBitStruct) == sizeof(uint32_t), "TestBitStructs#MixedSize"); + +TEST(BitStructs, Mixed) { + EXPECT_EQ(4u, sizeof(MixedSizeBitStruct)); + + MixedSizeBitStruct tst; + ZeroInitialize(tst); + + // Check operator assignment. + tst.u3 = 0b111u; + tst.u10 = 0b1111010100u; + tst.u19 = 0b1010101010101010101u; + + // Check implicit operator conversion. + uint8_t read_u3 = tst.u3; + uint16_t read_u10 = tst.u10; + uint32_t read_u19 = tst.u19; + + // Ensure read-out values were correct. + EXPECT_EQ(0b111u, read_u3); + EXPECT_EQ(0b1111010100u, read_u10); + EXPECT_EQ(0b1010101010101010101u, read_u19); + + uint32_t read_all = tst.alias_all; + + // Ensure aliasing is working. + EXPECT_EQ(0b10101010101010101011111010100111u, read_all); + + // Ensure the bit pattern is correct. + EXPECT_EQ(0b10101010101010101011111010100111u, AsUint(tst)); +} + +} // namespace art diff --git a/runtime/base/bit_utils.h b/runtime/base/bit_utils.h index 87dac0261e..da3c7048b6 100644 --- a/runtime/base/bit_utils.h +++ b/runtime/base/bit_utils.h @@ -371,6 +371,128 @@ inline static uint64_t ReverseBits64(uint64_t opnd) { return opnd; } +// Create a mask for the least significant "bits" +// The returned value is always unsigned to prevent undefined behavior for bitwise ops. +// +// Given 'bits', +// Returns: +// <--- bits ---> +// +-----------------+------------+ +// | 0 ............0 | 1.....1 | +// +-----------------+------------+ +// msb lsb +template <typename T = size_t> +inline static constexpr std::make_unsigned_t<T> MaskLeastSignificant(size_t bits) { + DCHECK_GE(BitSizeOf<T>(), bits) << "Bits out of range for type T"; + using unsigned_T = std::make_unsigned_t<T>; + if (bits >= BitSizeOf<T>()) { + return std::numeric_limits<unsigned_T>::max(); + } else { + return static_cast<unsigned_T>((1 << bits) - 1); + } +} + +// Clears the bitfield starting at the least significant bit "lsb" with a bitwidth of 'width'. +// (Equivalent of ARM BFC instruction). +// +// Given: +// <-- width --> +// +--------+------------+--------+ +// | ABC... | bitfield | XYZ... + +// +--------+------------+--------+ +// lsb 0 +// Returns: +// <-- width --> +// +--------+------------+--------+ +// | ABC... | 0........0 | XYZ... + +// +--------+------------+--------+ +// lsb 0 +template <typename T> +inline static constexpr T BitFieldClear(T value, size_t lsb, size_t width) { + DCHECK_GE(BitSizeOf(value), lsb + width) << "Bit field out of range for value"; + const auto val = static_cast<std::make_unsigned_t<T>>(value); + const auto mask = MaskLeastSignificant<T>(width); + + return static_cast<T>(val & ~(mask << lsb)); +} + +// Inserts the contents of 'data' into bitfield of 'value' starting +// at the least significant bit "lsb" with a bitwidth of 'width'. +// Note: data must be within range of [MinInt(width), MaxInt(width)]. +// (Equivalent of ARM BFI instruction). +// +// Given (data): +// <-- width --> +// +--------+------------+--------+ +// | ABC... | bitfield | XYZ... + +// +--------+------------+--------+ +// lsb 0 +// Returns: +// <-- width --> +// +--------+------------+--------+ +// | ABC... | 0...data | XYZ... + +// +--------+------------+--------+ +// lsb 0 + +template <typename T, typename T2> +inline static constexpr T BitFieldInsert(T value, T2 data, size_t lsb, size_t width) { + DCHECK_GE(BitSizeOf(value), lsb + width) << "Bit field out of range for value"; + if (width != 0u) { + DCHECK_GE(MaxInt<T2>(width), data) << "Data out of range [too large] for bitwidth"; + DCHECK_LE(MinInt<T2>(width), data) << "Data out of range [too small] for bitwidth"; + } else { + DCHECK_EQ(static_cast<T2>(0), data) << "Data out of range [nonzero] for bitwidth 0"; + } + const auto data_mask = MaskLeastSignificant<T2>(width); + const auto value_cleared = BitFieldClear(value, lsb, width); + + return static_cast<T>(value_cleared | ((data & data_mask) << lsb)); +} + +// Extracts the bitfield starting at the least significant bit "lsb" with a bitwidth of 'width'. +// Signed types are sign-extended during extraction. (Equivalent of ARM UBFX/SBFX instruction). +// +// Given: +// <-- width --> +// +--------+-------------+-------+ +// | | bitfield | + +// +--------+-------------+-------+ +// lsb 0 +// (Unsigned) Returns: +// <-- width --> +// +----------------+-------------+ +// | 0... 0 | bitfield | +// +----------------+-------------+ +// 0 +// (Signed) Returns: +// <-- width --> +// +----------------+-------------+ +// | S... S | bitfield | +// +----------------+-------------+ +// 0 +// where S is the highest bit in 'bitfield'. +template <typename T> +inline static constexpr T BitFieldExtract(T value, size_t lsb, size_t width) { + DCHECK_GE(BitSizeOf(value), lsb + width) << "Bit field out of range for value"; + const auto val = static_cast<std::make_unsigned_t<T>>(value); + + const T bitfield_unsigned = + static_cast<T>((val >> lsb) & MaskLeastSignificant<T>(width)); + if (std::is_signed<T>::value) { + // Perform sign extension + if (width == 0) { // Avoid underflow. + return static_cast<T>(0); + } else if (bitfield_unsigned & (1 << (width - 1))) { // Detect if sign bit was set. + // MSB <width> LSB + // 0b11111...100...000000 + const auto ones_negmask = ~MaskLeastSignificant<T>(width); + return static_cast<T>(bitfield_unsigned | ones_negmask); + } + } + // Skip sign extension. + return bitfield_unsigned; +} + } // namespace art #endif // ART_RUNTIME_BASE_BIT_UTILS_H_ diff --git a/runtime/base/bit_utils_test.cc b/runtime/base/bit_utils_test.cc index c96c6dc933..0276d8ded2 100644 --- a/runtime/base/bit_utils_test.cc +++ b/runtime/base/bit_utils_test.cc @@ -345,6 +345,97 @@ static_assert(IsAbsoluteUint<32, int64_t>(std::numeric_limits<uint32_t>::max()), "TestIsAbsoluteUint64#27"); static_assert(!IsAbsoluteUint<32, int64_t>(kUint32MaxPlus1), "TestIsAbsoluteUint64#28"); +static_assert(MaskLeastSignificant(0) == 0b0, "TestMaskLeastSignificant#1"); +static_assert(MaskLeastSignificant(1) == 0b1, "TestMaskLeastSignificant#2"); +static_assert(MaskLeastSignificant(2) == 0b11, "TestMaskLeastSignificant#3"); +static_assert(MaskLeastSignificant<uint8_t>(8) == 0xFF, "TestMaskLeastSignificant#4"); +static_assert(MaskLeastSignificant<int8_t>(8) == 0xFF, "TestMaskLeastSignificant#5"); + +static_assert(BitFieldClear(0xFF, /*lsb*/0, /*width*/0) == 0xFF, "TestBitFieldClear#1"); +static_assert(BitFieldClear(std::numeric_limits<uint32_t>::max(), /*lsb*/0, /*width*/32) == 0x0, + "TestBitFieldClear#2"); +static_assert(BitFieldClear(std::numeric_limits<int32_t>::max(), /*lsb*/0, /*width*/32) == 0x0, + "TestBitFieldClear#3"); +static_assert(BitFieldClear(0xFF, /*lsb*/0, /*width*/2) == 0b11111100, "TestBitFieldClear#4"); +static_assert(BitFieldClear(0xFF, /*lsb*/0, /*width*/3) == 0b11111000, "TestBitFieldClear#5"); +static_assert(BitFieldClear(0xFF, /*lsb*/1, /*width*/3) == 0b11110001, "TestBitFieldClear#6"); +static_assert(BitFieldClear(0xFF, /*lsb*/2, /*width*/3) == 0b11100011, "TestBitFieldClear#7"); + +static_assert(BitFieldExtract(0xFF, /*lsb*/0, /*width*/0) == 0x0, "TestBitFieldExtract#1"); +static_assert(BitFieldExtract(std::numeric_limits<uint32_t>::max(), /*lsb*/0, /*width*/32) + == std::numeric_limits<uint32_t>::max(), + "TestBitFieldExtract#2"); +static_assert(BitFieldExtract(std::numeric_limits<int32_t>::max(), /*lsb*/0, /*width*/32) + == std::numeric_limits<int32_t>::max(), + "TestBitFieldExtract#3"); +static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/0, /*width*/2) == 0b00000011, + "TestBitFieldExtract#4"); +static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/0, /*width*/3) == 0b00000111, + "TestBitFieldExtract#5"); +static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/1, /*width*/3) == 0b00000111, + "TestBitFieldExtract#6"); +static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/2, /*width*/3) == 0b00000111, + "TestBitFieldExtract#7"); +static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/3, /*width*/3) == 0b00000111, + "TestBitFieldExtract#8"); +static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/8, /*width*/3) == 0b00000000, + "TestBitFieldExtract#9"); +static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/7, /*width*/3) == 0b00000001, + "TestBitFieldExtract#10"); +static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/6, /*width*/3) == 0b00000011, + "TestBitFieldExtract#11"); +static_assert(BitFieldExtract(0xFF, /*lsb*/0, /*width*/2) == -1, "TestBitFieldExtract#12"); +static_assert(BitFieldExtract(0xFF, /*lsb*/0, /*width*/3) == -1, "TestBitFieldExtract#13"); +static_assert(BitFieldExtract(0xFF, /*lsb*/1, /*width*/3) == -1, "TestBitFieldExtract#14"); +static_assert(BitFieldExtract(0xFF, /*lsb*/2, /*width*/3) == -1, "TestBitFieldExtract#15"); +static_assert(BitFieldExtract(0xFF, /*lsb*/3, /*width*/3) == -1, "TestBitFieldExtract#16"); +static_assert(BitFieldExtract(0xFF, /*lsb*/8, /*width*/3) == 0b00000000, "TestBitFieldExtract#17"); +static_assert(BitFieldExtract(0xFF, /*lsb*/7, /*width*/3) == 0b00000001, "TestBitFieldExtract#18"); +static_assert(BitFieldExtract(0xFF, /*lsb*/6, /*width*/3) == 0b00000011, "TestBitFieldExtract#19"); +static_assert(BitFieldExtract(static_cast<uint8_t>(0b01101010), /*lsb*/2, /*width*/4) + == 0b00001010, + "TestBitFieldExtract#20"); +static_assert(BitFieldExtract(static_cast<int8_t>(0b01101010), /*lsb*/2, /*width*/4) + == static_cast<int8_t>(0b11111010), + "TestBitFieldExtract#21"); + +static_assert(BitFieldInsert(0xFF, /*data*/0x0, /*lsb*/0, /*width*/0) == 0xFF, + "TestBitFieldInsert#1"); +static_assert(BitFieldInsert(std::numeric_limits<uint32_t>::max(), + /*data*/std::numeric_limits<uint32_t>::max(), + /*lsb*/0, + /*width*/32) + == std::numeric_limits<uint32_t>::max(), + "TestBitFieldInsert#2"); +static_assert(BitFieldInsert(std::numeric_limits<int32_t>::max(), + /*data*/std::numeric_limits<uint32_t>::max(), + /*lsb*/0, + /*width*/32) + == std::numeric_limits<uint32_t>::max(), + "TestBitFieldInsert#3"); +static_assert(BitFieldInsert(0u, + /*data*/std::numeric_limits<uint32_t>::max(), + /*lsb*/0, + /*width*/32) + == std::numeric_limits<uint32_t>::max(), + "TestBitFieldInsert#4"); +static_assert(BitFieldInsert(-(-0), + /*data*/std::numeric_limits<uint32_t>::max(), + /*lsb*/0, + /*width*/32) + == std::numeric_limits<uint32_t>::max(), + "TestBitFieldInsert#5"); +static_assert(BitFieldInsert(0x00, /*data*/0b11u, /*lsb*/0, /*width*/2) == 0b00000011, + "TestBitFieldInsert#6"); +static_assert(BitFieldInsert(0x00, /*data*/0b111u, /*lsb*/0, /*width*/3) == 0b00000111, + "TestBitFieldInsert#7"); +static_assert(BitFieldInsert(0x00, /*data*/0b111u, /*lsb*/1, /*width*/3) == 0b00001110, + "TestBitFieldInsert#8"); +static_assert(BitFieldInsert(0x00, /*data*/0b111u, /*lsb*/2, /*width*/3) == 0b00011100, + "TestBitFieldInsert#9"); +static_assert(BitFieldInsert(0b01011100, /*data*/0b1101u, /*lsb*/4, /*width*/4) == 0b11011100, + "TestBitFieldInsert#10"); + template <typename Container> void CheckElements(const std::initializer_list<uint32_t>& expected, const Container& elements) { auto expected_it = expected.begin(); diff --git a/runtime/base/file_magic.cc b/runtime/base/file_magic.cc index 568a7ae5d6..30b4f0559d 100644 --- a/runtime/base/file_magic.cc +++ b/runtime/base/file_magic.cc @@ -55,8 +55,4 @@ bool IsZipMagic(uint32_t magic) { ('K' == ((magic >> 8) & 0xff))); } -bool IsDexMagic(uint32_t magic) { - return DexFile::IsMagicValid(reinterpret_cast<const uint8_t*>(&magic)); -} - } // namespace art diff --git a/runtime/base/file_magic.h b/runtime/base/file_magic.h index 4b5d2f5a48..1c9effdb50 100644 --- a/runtime/base/file_magic.h +++ b/runtime/base/file_magic.h @@ -29,7 +29,6 @@ File OpenAndReadMagic(const char* filename, uint32_t* magic, std::string* error_ // Check whether the given magic matches a known file type. bool IsZipMagic(uint32_t magic); -bool IsDexMagic(uint32_t magic); } // namespace art diff --git a/runtime/base/hash_set.h b/runtime/base/hash_set.h index c472a9ee03..bc25b363b8 100644 --- a/runtime/base/hash_set.h +++ b/runtime/base/hash_set.h @@ -22,6 +22,7 @@ #include <functional> #include <iterator> #include <memory> +#include <type_traits> #include <utility> #include "bit_utils.h" @@ -385,18 +386,20 @@ class HashSet { } // Insert an element, allows duplicates. - void Insert(const T& element) { - InsertWithHash(element, hashfn_(element)); + template <typename U, typename = typename std::enable_if<std::is_convertible<U, T>::value>::type> + void Insert(U&& element) { + InsertWithHash(std::forward<U>(element), hashfn_(element)); } - void InsertWithHash(const T& element, size_t hash) { + template <typename U, typename = typename std::enable_if<std::is_convertible<U, T>::value>::type> + void InsertWithHash(U&& element, size_t hash) { DCHECK_EQ(hash, hashfn_(element)); if (num_elements_ >= elements_until_expand_) { Expand(); DCHECK_LT(num_elements_, elements_until_expand_); } const size_t index = FirstAvailableSlot(IndexForHash(hash)); - data_[index] = element; + data_[index] = std::forward<U>(element); ++num_elements_; } diff --git a/runtime/base/scoped_arena_allocator.h b/runtime/base/scoped_arena_allocator.h index 1a0eb5ea07..f156f526fc 100644 --- a/runtime/base/scoped_arena_allocator.h +++ b/runtime/base/scoped_arena_allocator.h @@ -145,6 +145,10 @@ class ScopedArenaAllocator explicit ScopedArenaAllocator(ArenaStack* arena_stack); ~ScopedArenaAllocator(); + ArenaStack* GetArenaStack() const { + return arena_stack_; + } + void Reset(); void* Alloc(size_t bytes, ArenaAllocKind kind = kArenaAllocMisc) ALWAYS_INLINE { diff --git a/runtime/base/scoped_arena_containers.h b/runtime/base/scoped_arena_containers.h index 4a6c9076af..fccaaeaa42 100644 --- a/runtime/base/scoped_arena_containers.h +++ b/runtime/base/scoped_arena_containers.h @@ -52,17 +52,40 @@ template <typename T> using ScopedArenaVector = dchecked_vector<T, ScopedArenaAllocatorAdapter<T>>; template <typename T, typename Comparator = std::less<T>> +using ScopedArenaPriorityQueue = std::priority_queue<T, ScopedArenaVector<T>, Comparator>; + +template <typename T> +using ScopedArenaStdStack = std::stack<T, ScopedArenaDeque<T>>; + +template <typename T, typename Comparator = std::less<T>> using ScopedArenaSet = std::set<T, Comparator, ScopedArenaAllocatorAdapter<T>>; template <typename K, typename V, typename Comparator = std::less<K>> using ScopedArenaSafeMap = SafeMap<K, V, Comparator, ScopedArenaAllocatorAdapter<std::pair<const K, V>>>; +template <typename T, + typename EmptyFn = DefaultEmptyFn<T>, + typename HashFn = std::hash<T>, + typename Pred = std::equal_to<T>> +using ScopedArenaHashSet = HashSet<T, EmptyFn, HashFn, Pred, ScopedArenaAllocatorAdapter<T>>; + +template <typename Key, + typename Value, + typename EmptyFn = DefaultEmptyFn<std::pair<Key, Value>>, + typename HashFn = std::hash<Key>, + typename Pred = std::equal_to<Key>> +using ScopedArenaHashMap = HashMap<Key, + Value, + EmptyFn, + HashFn, + Pred, + ScopedArenaAllocatorAdapter<std::pair<Key, Value>>>; + template <typename K, typename V, class Hash = std::hash<K>, class KeyEqual = std::equal_to<K>> using ScopedArenaUnorderedMap = std::unordered_map<K, V, Hash, KeyEqual, ScopedArenaAllocatorAdapter<std::pair<const K, V>>>; - // Implementation details below. template <> @@ -79,12 +102,12 @@ class ScopedArenaAllocatorAdapter<void> typedef ScopedArenaAllocatorAdapter<U> other; }; - explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* arena_allocator, + explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* allocator, ArenaAllocKind kind = kArenaAllocSTL) - : DebugStackReference(arena_allocator), - DebugStackIndirectTopRef(arena_allocator), + : DebugStackReference(allocator), + DebugStackIndirectTopRef(allocator), ArenaAllocatorAdapterKind(kind), - arena_stack_(arena_allocator->arena_stack_) { + arena_stack_(allocator->arena_stack_) { } template <typename U> ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter<U>& other) // NOLINT, implicit @@ -122,12 +145,12 @@ class ScopedArenaAllocatorAdapter typedef ScopedArenaAllocatorAdapter<U> other; }; - explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* arena_allocator, + explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* allocator, ArenaAllocKind kind = kArenaAllocSTL) - : DebugStackReference(arena_allocator), - DebugStackIndirectTopRef(arena_allocator), + : DebugStackReference(allocator), + DebugStackIndirectTopRef(allocator), ArenaAllocatorAdapterKind(kind), - arena_stack_(arena_allocator->arena_stack_) { + arena_stack_(allocator->arena_stack_) { } template <typename U> ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter<U>& other) // NOLINT, implicit diff --git a/runtime/base/variant_map.h b/runtime/base/variant_map.h index d87df8710c..71a1018dea 100644 --- a/runtime/base/variant_map.h +++ b/runtime/base/variant_map.h @@ -237,6 +237,14 @@ struct VariantMap { return (ptr == nullptr) ? key.CreateDefaultValue() : *ptr; } + template <typename T, typename U> + void AssignIfExists(const TKey<T>& key, U* out) { + DCHECK(out != nullptr); + if (Exists(key)) { + *out = std::move(*Get(key)); + } + } + private: // TODO: move to detail, or make it more generic like a ScopeGuard(function) template <typename TValue> diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index 8999e17cf1..fe91272ef7 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -51,6 +51,7 @@ #include "compiler_callbacks.h" #include "debugger.h" #include "dex_file-inl.h" +#include "dex_file_loader.h" #include "entrypoints/entrypoint_utils.h" #include "entrypoints/runtime_asm_entrypoints.h" #include "experimental_flags.h" @@ -2866,6 +2867,11 @@ bool ClassLinker::ShouldUseInterpreterEntrypoint(ArtMethod* method, const void* return true; } + if (Thread::Current()->IsAsyncExceptionPending()) { + // Force use of interpreter to handle async-exceptions + return true; + } + if (runtime->IsJavaDebuggable()) { // For simplicity, we ignore precompiled code and go to the interpreter // assuming we don't already have jitted code. @@ -8709,10 +8715,11 @@ class GetResolvedClassesVisitor : public ClassVisitor { const DexFile& dex_file = klass->GetDexFile(); if (&dex_file != last_dex_file_) { last_dex_file_ = &dex_file; - DexCacheResolvedClasses resolved_classes(dex_file.GetLocation(), - dex_file.GetBaseLocation(), - dex_file.GetLocationChecksum(), - dex_file.NumMethodIds()); + DexCacheResolvedClasses resolved_classes( + dex_file.GetLocation(), + DexFileLoader::GetBaseLocation(dex_file.GetLocation()), + dex_file.GetLocationChecksum(), + dex_file.NumMethodIds()); last_resolved_classes_ = result_->find(resolved_classes); if (last_resolved_classes_ == result_->end()) { last_resolved_classes_ = result_->insert(resolved_classes).first; diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc index f887b8ed42..3d9fd59e0b 100644 --- a/runtime/class_linker_test.cc +++ b/runtime/class_linker_test.cc @@ -26,7 +26,7 @@ #include "base/enums.h" #include "class_linker-inl.h" #include "common_runtime_test.h" -#include "dex_file.h" +#include "native_dex_file.h" #include "dex_file_types.h" #include "entrypoints/entrypoint_utils-inl.h" #include "experimental_flags.h" @@ -1462,11 +1462,11 @@ TEST_F(ClassLinkerTest, RegisterDexFileName) { dex_cache->SetLocation(location.Get()); const DexFile* old_dex_file = dex_cache->GetDexFile(); - std::unique_ptr<DexFile> dex_file(new DexFile(old_dex_file->Begin(), - old_dex_file->Size(), - location->ToModifiedUtf8(), - 0u, - nullptr)); + std::unique_ptr<DexFile> dex_file(new NativeDexFile(old_dex_file->Begin(), + old_dex_file->Size(), + location->ToModifiedUtf8(), + 0u, + nullptr)); { WriterMutexLock mu(soa.Self(), *Locks::dex_lock_); // Check that inserting with a UTF16 name works. diff --git a/runtime/class_loader_context.cc b/runtime/class_loader_context.cc index 2282da048f..167533d68a 100644 --- a/runtime/class_loader_context.cc +++ b/runtime/class_loader_context.cc @@ -25,6 +25,7 @@ #include "class_linker.h" #include "class_loader_utils.h" #include "dex_file.h" +#include "dex_file_loader.h" #include "handle_scope-inl.h" #include "jni_internal.h" #include "oat_file_assistant.h" @@ -227,11 +228,11 @@ bool ClassLoaderContext::OpenDexFiles(InstructionSet isa, const std::string& cla std::string error_msg; // When opening the dex files from the context we expect their checksum to match their // contents. So pass true to verify_checksum. - if (!DexFile::Open(location.c_str(), - location.c_str(), - /*verify_checksum*/ true, - &error_msg, - &info.opened_dex_files)) { + if (!DexFileLoader::Open(location.c_str(), + location.c_str(), + /*verify_checksum*/ true, + &error_msg, + &info.opened_dex_files)) { // If we fail to open the dex file because it's been stripped, try to open the dex file // from its corresponding oat file. // This could happen when we need to recompile a pre-build whose dex code has been stripped. @@ -282,7 +283,7 @@ bool ClassLoaderContext::RemoveLocationsFromClassPaths( std::set<std::string> canonical_locations; for (const std::string& location : locations) { - canonical_locations.insert(DexFile::GetDexCanonicalLocation(location.c_str())); + canonical_locations.insert(DexFileLoader::GetDexCanonicalLocation(location.c_str())); } bool removed_locations = false; for (ClassLoaderInfo& info : class_loader_chain_) { @@ -292,7 +293,7 @@ bool ClassLoaderContext::RemoveLocationsFromClassPaths( info.classpath.end(), [canonical_locations](const std::string& location) { return ContainsElement(canonical_locations, - DexFile::GetDexCanonicalLocation(location.c_str())); + DexFileLoader::GetDexCanonicalLocation(location.c_str())); }); info.classpath.erase(kept_it, info.classpath.end()); if (initial_size != info.classpath.size()) { @@ -340,7 +341,8 @@ std::string ClassLoaderContext::EncodeContext(const std::string& base_dir, if (for_dex2oat) { // dex2oat only needs the base location. It cannot accept multidex locations. // So ensure we only add each file once. - bool new_insert = seen_locations.insert(dex_file->GetBaseLocation()).second; + bool new_insert = seen_locations.insert( + DexFileLoader::GetBaseLocation(dex_file->GetLocation())).second; if (!new_insert) { continue; } diff --git a/runtime/class_loader_context_test.cc b/runtime/class_loader_context_test.cc index ae3dcecb4a..be6acde4a9 100644 --- a/runtime/class_loader_context_test.cc +++ b/runtime/class_loader_context_test.cc @@ -100,12 +100,13 @@ class ClassLoaderContextTest : public CommonRuntimeTest { info.opened_dex_files[cur_open_dex_index++]; std::unique_ptr<const DexFile>& expected_dex_file = (*all_dex_files)[k]; - std::string expected_location = expected_dex_file->GetBaseLocation(); + std::string expected_location = + DexFileLoader::GetBaseLocation(expected_dex_file->GetLocation()); UniqueCPtr<const char[]> expected_real_location( realpath(expected_location.c_str(), nullptr)); ASSERT_TRUE(expected_real_location != nullptr) << expected_location; expected_location.assign(expected_real_location.get()); - expected_location += DexFile::GetMultiDexSuffix(expected_dex_file->GetLocation()); + expected_location += DexFileLoader::GetMultiDexSuffix(expected_dex_file->GetLocation()); ASSERT_EQ(expected_location, opened_dex_file->GetLocation()); ASSERT_EQ(expected_dex_file->GetLocationChecksum(), opened_dex_file->GetLocationChecksum()); diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc index 29b376a21c..0c2e49010e 100644 --- a/runtime/common_runtime_test.cc +++ b/runtime/common_runtime_test.cc @@ -34,6 +34,7 @@ #include "class_linker.h" #include "compiler_callbacks.h" #include "dex_file-inl.h" +#include "dex_file_loader.h" #include "gc/heap.h" #include "gc_root-inl.h" #include "gtest/gtest.h" @@ -372,7 +373,7 @@ std::unique_ptr<const DexFile> CommonRuntimeTestImpl::LoadExpectSingleDexFile( std::string error_msg; MemMap::Init(); static constexpr bool kVerifyChecksum = true; - if (!DexFile::Open(location, location, kVerifyChecksum, &error_msg, &dex_files)) { + if (!DexFileLoader::Open(location, location, kVerifyChecksum, &error_msg, &dex_files)) { LOG(FATAL) << "Could not open .dex file '" << location << "': " << error_msg << "\n"; UNREACHABLE(); } else { @@ -571,7 +572,7 @@ std::vector<std::unique_ptr<const DexFile>> CommonRuntimeTestImpl::OpenTestDexFi static constexpr bool kVerifyChecksum = true; std::string error_msg; std::vector<std::unique_ptr<const DexFile>> dex_files; - bool success = DexFile::Open( + bool success = DexFileLoader::Open( filename.c_str(), filename.c_str(), kVerifyChecksum, &error_msg, &dex_files); CHECK(success) << "Failed to open '" << filename << "': " << error_msg; for (auto& dex_file : dex_files) { diff --git a/runtime/dex2oat_environment_test.h b/runtime/dex2oat_environment_test.h index 93daa45519..a9bb95480e 100644 --- a/runtime/dex2oat_environment_test.h +++ b/runtime/dex2oat_environment_test.h @@ -26,6 +26,7 @@ #include "base/stl_util.h" #include "common_runtime_test.h" #include "compiler_callbacks.h" +#include "dex_file_loader.h" #include "exec_utils.h" #include "gc/heap.h" #include "gc/space/image_space.h" @@ -71,7 +72,8 @@ class Dex2oatEnvironmentTest : public CommonRuntimeTest { << "Expected dex file to be at: " << GetDexSrc1(); ASSERT_TRUE(OS::FileExists(GetStrippedDexSrc1().c_str())) << "Expected stripped dex file to be at: " << GetStrippedDexSrc1(); - ASSERT_FALSE(DexFile::GetMultiDexChecksums(GetStrippedDexSrc1().c_str(), &checksums, &error_msg)) + ASSERT_FALSE( + DexFileLoader::GetMultiDexChecksums(GetStrippedDexSrc1().c_str(), &checksums, &error_msg)) << "Expected stripped dex file to be stripped: " << GetStrippedDexSrc1(); ASSERT_TRUE(OS::FileExists(GetDexSrc2().c_str())) << "Expected dex file to be at: " << GetDexSrc2(); @@ -80,13 +82,19 @@ class Dex2oatEnvironmentTest : public CommonRuntimeTest { // GetMultiDexSrc1, but a different secondary dex checksum. static constexpr bool kVerifyChecksum = true; std::vector<std::unique_ptr<const DexFile>> multi1; - ASSERT_TRUE(DexFile::Open(GetMultiDexSrc1().c_str(), - GetMultiDexSrc1().c_str(), kVerifyChecksum, &error_msg, &multi1)) << error_msg; + ASSERT_TRUE(DexFileLoader::Open(GetMultiDexSrc1().c_str(), + GetMultiDexSrc1().c_str(), + kVerifyChecksum, + &error_msg, + &multi1)) << error_msg; ASSERT_GT(multi1.size(), 1u); std::vector<std::unique_ptr<const DexFile>> multi2; - ASSERT_TRUE(DexFile::Open(GetMultiDexSrc2().c_str(), - GetMultiDexSrc2().c_str(), kVerifyChecksum, &error_msg, &multi2)) << error_msg; + ASSERT_TRUE(DexFileLoader::Open(GetMultiDexSrc2().c_str(), + GetMultiDexSrc2().c_str(), + kVerifyChecksum, + &error_msg, + &multi2)) << error_msg; ASSERT_GT(multi2.size(), 1u); ASSERT_EQ(multi1[0]->GetLocationChecksum(), multi2[0]->GetLocationChecksum()); diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc index 2e776b0e61..f6b3428208 100644 --- a/runtime/dex_file.cc +++ b/runtime/dex_file.cc @@ -23,7 +23,6 @@ #include <string.h> #include <sys/file.h> #include <sys/mman.h> // For the PROT_* and MAP_* constants. -#include <sys/stat.h> #include <zlib.h> #include <memory> @@ -33,19 +32,17 @@ #include "android-base/stringprintf.h" #include "base/enums.h" -#include "base/file_magic.h" #include "base/logging.h" #include "base/stl_util.h" -#include "base/systrace.h" -#include "base/unix_file/fd_file.h" #include "dex_file-inl.h" -#include "dex_file_verifier.h" +#include "dex_file_loader.h" #include "jvalue.h" #include "leb128.h" +#include "mem_map.h" +#include "native_dex_file.h" #include "os.h" #include "utf-inl.h" #include "utils.h" -#include "zip_archive.h" namespace art { @@ -56,22 +53,6 @@ static_assert(std::is_trivially_copyable<dex::StringIndex>::value, "StringIndex static_assert(sizeof(dex::TypeIndex) == sizeof(uint16_t), "TypeIndex size is wrong"); static_assert(std::is_trivially_copyable<dex::TypeIndex>::value, "TypeIndex not trivial"); -static constexpr OatDexFile* kNoOatDexFile = nullptr; - -const char* DexFile::kClassesDex = "classes.dex"; - -const uint8_t DexFile::kDexMagic[] = { 'd', 'e', 'x', '\n' }; -const uint8_t DexFile::kDexMagicVersions[DexFile::kNumDexVersions][DexFile::kDexVersionLen] = { - {'0', '3', '5', '\0'}, - // Dex version 036 skipped because of an old dalvik bug on some versions of android where dex - // files with that version number would erroneously be accepted and run. - {'0', '3', '7', '\0'}, - // Dex version 038: Android "O". - {'0', '3', '8', '\0'}, - // Dex verion 039: Beyond Android "O". - {'0', '3', '9', '\0'}, -}; - uint32_t DexFile::CalculateChecksum() const { const uint32_t non_sum = OFFSETOF_MEMBER(DexFile::Header, signature_); const uint8_t* non_sum_ptr = Begin() + non_sum; @@ -83,55 +64,6 @@ struct DexFile::AnnotationValue { uint8_t type_; }; -bool DexFile::GetMultiDexChecksums(const char* filename, - std::vector<uint32_t>* checksums, - std::string* error_msg) { - CHECK(checksums != nullptr); - uint32_t magic; - - File fd = OpenAndReadMagic(filename, &magic, error_msg); - if (fd.Fd() == -1) { - DCHECK(!error_msg->empty()); - return false; - } - if (IsZipMagic(magic)) { - std::unique_ptr<ZipArchive> zip_archive( - ZipArchive::OpenFromFd(fd.Release(), filename, error_msg)); - if (zip_archive.get() == nullptr) { - *error_msg = StringPrintf("Failed to open zip archive '%s' (error msg: %s)", filename, - error_msg->c_str()); - return false; - } - - uint32_t i = 0; - std::string zip_entry_name = GetMultiDexClassesDexName(i++); - std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(zip_entry_name.c_str(), error_msg)); - if (zip_entry.get() == nullptr) { - *error_msg = StringPrintf("Zip archive '%s' doesn't contain %s (error msg: %s)", filename, - zip_entry_name.c_str(), error_msg->c_str()); - return false; - } - - do { - checksums->push_back(zip_entry->GetCrc32()); - zip_entry_name = DexFile::GetMultiDexClassesDexName(i++); - zip_entry.reset(zip_archive->Find(zip_entry_name.c_str(), error_msg)); - } while (zip_entry.get() != nullptr); - return true; - } - if (IsDexMagic(magic)) { - std::unique_ptr<const DexFile> dex_file( - DexFile::OpenFile(fd.Release(), filename, false, false, error_msg)); - if (dex_file.get() == nullptr) { - return false; - } - checksums->push_back(dex_file->GetHeader().checksum_); - return true; - } - *error_msg = StringPrintf("Expected valid zip or dex file: '%s'", filename); - return false; -} - int DexFile::GetPermissions() const { if (mem_map_.get() == nullptr) { return 0; @@ -162,367 +94,6 @@ bool DexFile::DisableWrite() const { } } - -std::unique_ptr<const DexFile> DexFile::Open(const uint8_t* base, - size_t size, - const std::string& location, - uint32_t location_checksum, - const OatDexFile* oat_dex_file, - bool verify, - bool verify_checksum, - std::string* error_msg) { - ScopedTrace trace(std::string("Open dex file from RAM ") + location); - return OpenCommon(base, - size, - location, - location_checksum, - oat_dex_file, - verify, - verify_checksum, - error_msg); -} - -std::unique_ptr<const DexFile> DexFile::Open(const std::string& location, - uint32_t location_checksum, - std::unique_ptr<MemMap> map, - bool verify, - bool verify_checksum, - std::string* error_msg) { - ScopedTrace trace(std::string("Open dex file from mapped-memory ") + location); - CHECK(map.get() != nullptr); - - if (map->Size() < sizeof(DexFile::Header)) { - *error_msg = StringPrintf( - "DexFile: failed to open dex file '%s' that is too short to have a header", - location.c_str()); - return nullptr; - } - - std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(), - map->Size(), - location, - location_checksum, - kNoOatDexFile, - verify, - verify_checksum, - error_msg); - if (dex_file != nullptr) { - dex_file->mem_map_ = std::move(map); - } - return dex_file; -} - -bool DexFile::Open(const char* filename, - const std::string& location, - bool verify_checksum, - std::string* error_msg, - std::vector<std::unique_ptr<const DexFile>>* dex_files) { - ScopedTrace trace(std::string("Open dex file ") + std::string(location)); - DCHECK(dex_files != nullptr) << "DexFile::Open: out-param is nullptr"; - uint32_t magic; - File fd = OpenAndReadMagic(filename, &magic, error_msg); - if (fd.Fd() == -1) { - DCHECK(!error_msg->empty()); - return false; - } - if (IsZipMagic(magic)) { - return DexFile::OpenZip(fd.Release(), location, verify_checksum, error_msg, dex_files); - } - if (IsDexMagic(magic)) { - std::unique_ptr<const DexFile> dex_file(DexFile::OpenFile(fd.Release(), - location, - /* verify */ true, - verify_checksum, - error_msg)); - if (dex_file.get() != nullptr) { - dex_files->push_back(std::move(dex_file)); - return true; - } else { - return false; - } - } - *error_msg = StringPrintf("Expected valid zip or dex file: '%s'", filename); - return false; -} - -std::unique_ptr<const DexFile> DexFile::OpenDex(int fd, - const std::string& location, - bool verify_checksum, - std::string* error_msg) { - ScopedTrace trace("Open dex file " + std::string(location)); - return OpenFile(fd, location, true /* verify */, verify_checksum, error_msg); -} - -bool DexFile::OpenZip(int fd, - const std::string& location, - bool verify_checksum, - std::string* error_msg, - std::vector<std::unique_ptr<const DexFile>>* dex_files) { - ScopedTrace trace("Dex file open Zip " + std::string(location)); - DCHECK(dex_files != nullptr) << "DexFile::OpenZip: out-param is nullptr"; - std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd, location.c_str(), error_msg)); - if (zip_archive.get() == nullptr) { - DCHECK(!error_msg->empty()); - return false; - } - return DexFile::OpenAllDexFilesFromZip(*zip_archive, - location, - verify_checksum, - error_msg, - dex_files); -} - -std::unique_ptr<const DexFile> DexFile::OpenFile(int fd, - const std::string& location, - bool verify, - bool verify_checksum, - std::string* error_msg) { - ScopedTrace trace(std::string("Open dex file ") + std::string(location)); - CHECK(!location.empty()); - std::unique_ptr<MemMap> map; - { - File delayed_close(fd, /* check_usage */ false); - struct stat sbuf; - memset(&sbuf, 0, sizeof(sbuf)); - if (fstat(fd, &sbuf) == -1) { - *error_msg = StringPrintf("DexFile: fstat '%s' failed: %s", location.c_str(), - strerror(errno)); - return nullptr; - } - if (S_ISDIR(sbuf.st_mode)) { - *error_msg = StringPrintf("Attempt to mmap directory '%s'", location.c_str()); - return nullptr; - } - size_t length = sbuf.st_size; - map.reset(MemMap::MapFile(length, - PROT_READ, - MAP_PRIVATE, - fd, - 0, - /*low_4gb*/false, - location.c_str(), - error_msg)); - if (map == nullptr) { - DCHECK(!error_msg->empty()); - return nullptr; - } - } - - if (map->Size() < sizeof(DexFile::Header)) { - *error_msg = StringPrintf( - "DexFile: failed to open dex file '%s' that is too short to have a header", - location.c_str()); - return nullptr; - } - - const Header* dex_header = reinterpret_cast<const Header*>(map->Begin()); - - std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(), - map->Size(), - location, - dex_header->checksum_, - kNoOatDexFile, - verify, - verify_checksum, - error_msg); - if (dex_file != nullptr) { - dex_file->mem_map_ = std::move(map); - } - - return dex_file; -} - -std::unique_ptr<const DexFile> DexFile::OpenOneDexFileFromZip(const ZipArchive& zip_archive, - const char* entry_name, - const std::string& location, - bool verify_checksum, - std::string* error_msg, - ZipOpenErrorCode* error_code) { - ScopedTrace trace("Dex file open from Zip Archive " + std::string(location)); - CHECK(!location.empty()); - std::unique_ptr<ZipEntry> zip_entry(zip_archive.Find(entry_name, error_msg)); - if (zip_entry == nullptr) { - *error_code = ZipOpenErrorCode::kEntryNotFound; - return nullptr; - } - if (zip_entry->GetUncompressedLength() == 0) { - *error_msg = StringPrintf("Dex file '%s' has zero length", location.c_str()); - *error_code = ZipOpenErrorCode::kDexFileError; - return nullptr; - } - - std::unique_ptr<MemMap> map; - if (zip_entry->IsUncompressed()) { - if (!zip_entry->IsAlignedTo(alignof(Header))) { - // Do not mmap unaligned ZIP entries because - // doing so would fail dex verification which requires 4 byte alignment. - LOG(WARNING) << "Can't mmap dex file " << location << "!" << entry_name << " directly; " - << "please zipalign to " << alignof(Header) << " bytes. " - << "Falling back to extracting file."; - } else { - // Map uncompressed files within zip as file-backed to avoid a dirty copy. - map.reset(zip_entry->MapDirectlyFromFile(location.c_str(), /*out*/error_msg)); - if (map == nullptr) { - LOG(WARNING) << "Can't mmap dex file " << location << "!" << entry_name << " directly; " - << "is your ZIP file corrupted? Falling back to extraction."; - // Try again with Extraction which still has a chance of recovery. - } - } - } - - if (map == nullptr) { - // Default path for compressed ZIP entries, - // and fallback for stored ZIP entries. - map.reset(zip_entry->ExtractToMemMap(location.c_str(), entry_name, error_msg)); - } - - if (map == nullptr) { - *error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", entry_name, location.c_str(), - error_msg->c_str()); - *error_code = ZipOpenErrorCode::kExtractToMemoryError; - return nullptr; - } - VerifyResult verify_result; - std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(), - map->Size(), - location, - zip_entry->GetCrc32(), - kNoOatDexFile, - /* verify */ true, - verify_checksum, - error_msg, - &verify_result); - if (dex_file == nullptr) { - if (verify_result == VerifyResult::kVerifyNotAttempted) { - *error_code = ZipOpenErrorCode::kDexFileError; - } else { - *error_code = ZipOpenErrorCode::kVerifyError; - } - return nullptr; - } - dex_file->mem_map_ = std::move(map); - if (!dex_file->DisableWrite()) { - *error_msg = StringPrintf("Failed to make dex file '%s' read only", location.c_str()); - *error_code = ZipOpenErrorCode::kMakeReadOnlyError; - return nullptr; - } - CHECK(dex_file->IsReadOnly()) << location; - if (verify_result != VerifyResult::kVerifySucceeded) { - *error_code = ZipOpenErrorCode::kVerifyError; - return nullptr; - } - *error_code = ZipOpenErrorCode::kNoError; - return dex_file; -} - -// Technically we do not have a limitation with respect to the number of dex files that can be in a -// multidex APK. However, it's bad practice, as each dex file requires its own tables for symbols -// (types, classes, methods, ...) and dex caches. So warn the user that we open a zip with what -// seems an excessive number. -static constexpr size_t kWarnOnManyDexFilesThreshold = 100; - -bool DexFile::OpenAllDexFilesFromZip(const ZipArchive& zip_archive, - const std::string& location, - bool verify_checksum, - std::string* error_msg, - std::vector<std::unique_ptr<const DexFile>>* dex_files) { - ScopedTrace trace("Dex file open from Zip " + std::string(location)); - DCHECK(dex_files != nullptr) << "DexFile::OpenFromZip: out-param is nullptr"; - ZipOpenErrorCode error_code; - std::unique_ptr<const DexFile> dex_file(OpenOneDexFileFromZip(zip_archive, - kClassesDex, - location, - verify_checksum, - error_msg, - &error_code)); - if (dex_file.get() == nullptr) { - return false; - } else { - // Had at least classes.dex. - dex_files->push_back(std::move(dex_file)); - - // Now try some more. - - // We could try to avoid std::string allocations by working on a char array directly. As we - // do not expect a lot of iterations, this seems too involved and brittle. - - for (size_t i = 1; ; ++i) { - std::string name = GetMultiDexClassesDexName(i); - std::string fake_location = GetMultiDexLocation(i, location.c_str()); - std::unique_ptr<const DexFile> next_dex_file(OpenOneDexFileFromZip(zip_archive, - name.c_str(), - fake_location, - verify_checksum, - error_msg, - &error_code)); - if (next_dex_file.get() == nullptr) { - if (error_code != ZipOpenErrorCode::kEntryNotFound) { - LOG(WARNING) << "Zip open failed: " << *error_msg; - } - break; - } else { - dex_files->push_back(std::move(next_dex_file)); - } - - if (i == kWarnOnManyDexFilesThreshold) { - LOG(WARNING) << location << " has in excess of " << kWarnOnManyDexFilesThreshold - << " dex files. Please consider coalescing and shrinking the number to " - " avoid runtime overhead."; - } - - if (i == std::numeric_limits<size_t>::max()) { - LOG(ERROR) << "Overflow in number of dex files!"; - break; - } - } - - return true; - } -} - -std::unique_ptr<DexFile> DexFile::OpenCommon(const uint8_t* base, - size_t size, - const std::string& location, - uint32_t location_checksum, - const OatDexFile* oat_dex_file, - bool verify, - bool verify_checksum, - std::string* error_msg, - VerifyResult* verify_result) { - if (verify_result != nullptr) { - *verify_result = VerifyResult::kVerifyNotAttempted; - } - std::unique_ptr<DexFile> dex_file(new DexFile(base, - size, - location, - location_checksum, - oat_dex_file)); - if (dex_file == nullptr) { - *error_msg = StringPrintf("Failed to open dex file '%s' from memory: %s", location.c_str(), - error_msg->c_str()); - return nullptr; - } - if (!dex_file->Init(error_msg)) { - dex_file.reset(); - return nullptr; - } - if (verify && !DexFileVerifier::Verify(dex_file.get(), - dex_file->Begin(), - dex_file->Size(), - location.c_str(), - verify_checksum, - error_msg)) { - if (verify_result != nullptr) { - *verify_result = VerifyResult::kVerifyFailed; - } - return nullptr; - } - if (verify_result != nullptr) { - *verify_result = VerifyResult::kVerifySucceeded; - } - return dex_file; -} - DexFile::DexFile(const uint8_t* base, size_t size, const std::string& location, @@ -569,7 +140,7 @@ bool DexFile::Init(std::string* error_msg) { } bool DexFile::CheckMagicAndVersion(std::string* error_msg) const { - if (!IsMagicValid(header_->magic_)) { + if (!IsMagicValid()) { std::ostringstream oss; oss << "Unrecognized magic number in " << GetLocation() << ":" << " " << header_->magic_[0] @@ -579,7 +150,7 @@ bool DexFile::CheckMagicAndVersion(std::string* error_msg) const { *error_msg = oss.str(); return false; } - if (!IsVersionValid(header_->magic_)) { + if (!IsVersionValid()) { std::ostringstream oss; oss << "Unrecognized version number in " << GetLocation() << ":" << " " << header_->magic_[4] @@ -619,22 +190,8 @@ void DexFile::InitializeSectionsFromMapList() { } } -bool DexFile::IsMagicValid(const uint8_t* magic) { - return (memcmp(magic, kDexMagic, sizeof(kDexMagic)) == 0); -} - -bool DexFile::IsVersionValid(const uint8_t* magic) { - const uint8_t* version = &magic[sizeof(kDexMagic)]; - for (uint32_t i = 0; i < kNumDexVersions; i++) { - if (memcmp(version, kDexMagicVersions[i], kDexVersionLen) == 0) { - return true; - } - } - return false; -} - uint32_t DexFile::Header::GetVersion() const { - const char* version = reinterpret_cast<const char*>(&magic_[sizeof(kDexMagic)]); + const char* version = reinterpret_cast<const char*>(&magic_[kDexMagicSize]); return atoi(version); } @@ -1218,41 +775,6 @@ bool DexFile::LineNumForPcCb(void* raw_context, const PositionInfo& entry) { } } -bool DexFile::IsMultiDexLocation(const char* location) { - return strrchr(location, kMultiDexSeparator) != nullptr; -} - -std::string DexFile::GetMultiDexClassesDexName(size_t index) { - if (index == 0) { - return "classes.dex"; - } else { - return StringPrintf("classes%zu.dex", index + 1); - } -} - -std::string DexFile::GetMultiDexLocation(size_t index, const char* dex_location) { - if (index == 0) { - return dex_location; - } else { - return StringPrintf("%s" kMultiDexSeparatorString "classes%zu.dex", dex_location, index + 1); - } -} - -std::string DexFile::GetDexCanonicalLocation(const char* dex_location) { - CHECK_NE(dex_location, static_cast<const char*>(nullptr)); - std::string base_location = GetBaseLocation(dex_location); - const char* suffix = dex_location + base_location.size(); - DCHECK(suffix[0] == 0 || suffix[0] == kMultiDexSeparator); - UniqueCPtr<const char[]> path(realpath(base_location.c_str(), nullptr)); - if (path != nullptr && path.get() != base_location) { - return std::string(path.get()) + suffix; - } else if (suffix[0] == 0) { - return base_location; - } else { - return dex_location; - } -} - // Read a signed integer. "zwidth" is the zero-based byte count. int32_t DexFile::ReadSignedInt(const uint8_t* ptr, int zwidth) { int32_t val = 0; diff --git a/runtime/dex_file.h b/runtime/dex_file.h index 9c5fd10a36..5759684c55 100644 --- a/runtime/dex_file.h +++ b/runtime/dex_file.h @@ -39,34 +39,28 @@ class Signature; class StringPiece; class ZipArchive; +// Dex file is the API that exposes native dex files (ordinary dex files) and CompactDex. +// Originally, the dex file format used by ART was mostly the same as APKs. The only change was +// quickened opcodes and layout optimizations. +// Since ART needs to support both native dex files and CompactDex files, the DexFile interface +// provides an abstraction to facilitate this. class DexFile { public: + // Number of bytes in the dex file magic. + static constexpr size_t kDexMagicSize = 4; + static constexpr size_t kDexVersionLen = 4; + // First Dex format version supporting default methods. static const uint32_t kDefaultMethodsVersion = 37; // First Dex format version enforcing class definition ordering rules. static const uint32_t kClassDefinitionOrderEnforcedVersion = 37; - static const uint8_t kDexMagic[]; - static constexpr size_t kNumDexVersions = 4; - static constexpr size_t kDexVersionLen = 4; - static const uint8_t kDexMagicVersions[kNumDexVersions][kDexVersionLen]; - static constexpr size_t kSha1DigestSize = 20; static constexpr uint32_t kDexEndianConstant = 0x12345678; - // name of the DexFile entry within a zip archive - static const char* kClassesDex; - // The value of an invalid index. static const uint16_t kDexNoIndex16 = 0xFFFF; - // The separator character in MultiDex locations. - static constexpr char kMultiDexSeparator = '!'; - - // A string version of the previous. This is a define so that we can merge string literals in the - // preprocessor. - #define kMultiDexSeparatorString "!" - // Raw header_item. struct Header { uint8_t magic_[8]; @@ -433,57 +427,6 @@ class DexFile { struct AnnotationValue; - // Returns the checksums of a file for comparison with GetLocationChecksum(). - // For .dex files, this is the single header checksum. - // For zip files, this is the zip entry CRC32 checksum for classes.dex and - // each additional multidex entry classes2.dex, classes3.dex, etc. - // Return true if the checksums could be found, false otherwise. - static bool GetMultiDexChecksums(const char* filename, - std::vector<uint32_t>* checksums, - std::string* error_msg); - - // Check whether a location denotes a multidex dex file. This is a very simple check: returns - // whether the string contains the separator character. - static bool IsMultiDexLocation(const char* location); - - // Opens .dex file, backed by existing memory - static std::unique_ptr<const DexFile> Open(const uint8_t* base, - size_t size, - const std::string& location, - uint32_t location_checksum, - const OatDexFile* oat_dex_file, - bool verify, - bool verify_checksum, - std::string* error_msg); - - // Opens .dex file that has been memory-mapped by the caller. - static std::unique_ptr<const DexFile> Open(const std::string& location, - uint32_t location_checkum, - std::unique_ptr<MemMap> mem_map, - bool verify, - bool verify_checksum, - std::string* error_msg); - - // Opens all .dex files found in the file, guessing the container format based on file extension. - static bool Open(const char* filename, - const std::string& location, - bool verify_checksum, - std::string* error_msg, - std::vector<std::unique_ptr<const DexFile>>* dex_files); - - // Open a single dex file from an fd. This function closes the fd. - static std::unique_ptr<const DexFile> OpenDex(int fd, - const std::string& location, - bool verify_checksum, - std::string* error_msg); - - // Opens dex files from within a .jar, .zip, or .apk file - static bool OpenZip(int fd, - const std::string& location, - bool verify_checksum, - std::string* error_msg, - std::vector<std::unique_ptr<const DexFile>>* dex_files); - // Closes a .dex file. virtual ~DexFile(); @@ -491,38 +434,6 @@ class DexFile { return location_; } - // For normal dex files, location and base location coincide. If a dex file is part of a multidex - // archive, the base location is the name of the originating jar/apk, stripped of any internal - // classes*.dex path. - static std::string GetBaseLocation(const char* location) { - const char* pos = strrchr(location, kMultiDexSeparator); - if (pos == nullptr) { - return location; - } else { - return std::string(location, pos - location); - } - } - - static std::string GetBaseLocation(const std::string& location) { - return GetBaseLocation(location.c_str()); - } - - // Returns the '!classes*.dex' part of the dex location. Returns an empty - // string if there is no multidex suffix for the given location. - // The kMultiDexSeparator is included in the returned suffix. - static std::string GetMultiDexSuffix(const std::string& location) { - size_t pos = location.rfind(kMultiDexSeparator); - if (pos == std::string::npos) { - return ""; - } else { - return location.substr(pos); - } - } - - std::string GetBaseLocation() const { - return GetBaseLocation(location_); - } - // For DexFiles directly from .dex files, this is the checksum from the DexFile::Header. // For DexFiles opened from a zip files, this will be the ZipEntry CRC32 of classes.dex. uint32_t GetLocationChecksum() const { @@ -540,10 +451,10 @@ class DexFile { } // Returns true if the byte string points to the magic value. - static bool IsMagicValid(const uint8_t* magic); + virtual bool IsMagicValid() const = 0; // Returns true if the byte string after the magic is the correct value. - static bool IsVersionValid(const uint8_t* magic); + virtual bool IsVersionValid() const = 0; // Returns the number of string identifiers in the .dex file. size_t NumStringIds() const { @@ -733,11 +644,10 @@ class DexFile { const TypeList* GetInterfacesList(const ClassDef& class_def) const { if (class_def.interfaces_off_ == 0) { - return nullptr; - } else { - const uint8_t* addr = begin_ + class_def.interfaces_off_; - return reinterpret_cast<const TypeList*>(addr); + return nullptr; } + const uint8_t* addr = begin_ + class_def.interfaces_off_; + return reinterpret_cast<const TypeList*>(addr); } uint32_t NumMethodHandles() const { @@ -760,11 +670,7 @@ class DexFile { // Returns a pointer to the raw memory mapped class_data_item const uint8_t* GetClassData(const ClassDef& class_def) const { - if (class_def.class_data_off_ == 0) { - return nullptr; - } else { - return begin_ + class_def.class_data_off_; - } + return (class_def.class_data_off_ == 0) ? nullptr : begin_ + class_def.class_data_off_; } // @@ -772,10 +678,9 @@ class DexFile { DCHECK_LT(code_off, size_) << "Code item offset larger then maximum allowed offset"; if (code_off == 0) { return nullptr; // native or abstract method - } else { - const uint8_t* addr = begin_ + code_off; - return reinterpret_cast<const CodeItem*>(addr); } + const uint8_t* addr = begin_ + code_off; + return reinterpret_cast<const CodeItem*>(addr); } const char* GetReturnTypeDescriptor(const ProtoId& proto_id) const; @@ -820,20 +725,13 @@ class DexFile { const char* GetShorty(uint32_t proto_idx) const; const TypeList* GetProtoParameters(const ProtoId& proto_id) const { - if (proto_id.parameters_off_ == 0) { - return nullptr; - } else { - const uint8_t* addr = begin_ + proto_id.parameters_off_; - return reinterpret_cast<const TypeList*>(addr); - } + return (proto_id.parameters_off_ == 0) + ? nullptr + : reinterpret_cast<const TypeList*>(begin_ + proto_id.parameters_off_); } const uint8_t* GetEncodedStaticFieldValuesArray(const ClassDef& class_def) const { - if (class_def.static_values_off_ == 0) { - return 0; - } else { - return begin_ + class_def.static_values_off_; - } + return (class_def.static_values_off_ == 0) ? 0 : begin_ + class_def.static_values_off_; } const uint8_t* GetCallSiteEncodedValuesArray(const CallSiteIdItem& call_site_id) const { @@ -860,27 +758,18 @@ class DexFile { // Check that the offset is in bounds. // Note that although the specification says that 0 should be used if there // is no debug information, some applications incorrectly use 0xFFFFFFFF. - if (code_item->debug_info_off_ == 0 || code_item->debug_info_off_ >= size_) { - return nullptr; - } else { - return begin_ + code_item->debug_info_off_; - } + const uint32_t debug_info_off = code_item->debug_info_off_; + return (debug_info_off == 0 || debug_info_off >= size_) ? nullptr : begin_ + debug_info_off; } struct PositionInfo { - PositionInfo() - : address_(0), - line_(0), - source_file_(nullptr), - prologue_end_(false), - epilogue_begin_(false) { - } + PositionInfo() = default; - uint32_t address_; // In 16-bit code units. - uint32_t line_; // Source code line number starting at 1. - const char* source_file_; // nullptr if the file from ClassDef still applies. - bool prologue_end_; - bool epilogue_begin_; + uint32_t address_ = 0; // In 16-bit code units. + uint32_t line_ = 0; // Source code line number starting at 1. + const char* source_file_ = nullptr; // nullptr if the file from ClassDef still applies. + bool prologue_end_ = false; + bool epilogue_begin_ = false; }; // Callback for "new position table entry". @@ -888,23 +777,15 @@ class DexFile { typedef bool (*DexDebugNewPositionCb)(void* context, const PositionInfo& entry); struct LocalInfo { - LocalInfo() - : name_(nullptr), - descriptor_(nullptr), - signature_(nullptr), - start_address_(0), - end_address_(0), - reg_(0), - is_live_(false) { - } - - const char* name_; // E.g., list. It can be nullptr if unknown. - const char* descriptor_; // E.g., Ljava/util/LinkedList; - const char* signature_; // E.g., java.util.LinkedList<java.lang.Integer> - uint32_t start_address_; // PC location where the local is first defined. - uint32_t end_address_; // PC location where the local is no longer defined. - uint16_t reg_; // Dex register which stores the values. - bool is_live_; // Is the local defined and live. + LocalInfo() = default; + + const char* name_ = nullptr; // E.g., list. It can be nullptr if unknown. + const char* descriptor_ = nullptr; // E.g., Ljava/util/LinkedList; + const char* signature_ = nullptr; // E.g., java.util.LinkedList<java.lang.Integer> + uint32_t start_address_ = 0; // PC location where the local is first defined. + uint32_t end_address_ = 0; // PC location where the local is no longer defined. + uint16_t reg_ = 0; // Dex register which stores the values. + bool is_live_ = false; // Is the local defined and live. }; // Callback for "new locals table entry". @@ -913,98 +794,82 @@ class DexFile { static bool LineNumForPcCb(void* context, const PositionInfo& entry); const AnnotationsDirectoryItem* GetAnnotationsDirectory(const ClassDef& class_def) const { - if (class_def.annotations_off_ == 0) { - return nullptr; - } else { - return reinterpret_cast<const AnnotationsDirectoryItem*>(begin_ + class_def.annotations_off_); - } + return (class_def.annotations_off_ == 0) + ? nullptr + : reinterpret_cast<const AnnotationsDirectoryItem*>(begin_ + class_def.annotations_off_); } const AnnotationSetItem* GetClassAnnotationSet(const AnnotationsDirectoryItem* anno_dir) const { - if (anno_dir->class_annotations_off_ == 0) { - return nullptr; - } else { - return reinterpret_cast<const AnnotationSetItem*>(begin_ + anno_dir->class_annotations_off_); - } + return (anno_dir->class_annotations_off_ == 0) + ? nullptr + : reinterpret_cast<const AnnotationSetItem*>(begin_ + anno_dir->class_annotations_off_); } const FieldAnnotationsItem* GetFieldAnnotations(const AnnotationsDirectoryItem* anno_dir) const { - if (anno_dir->fields_size_ == 0) { - return nullptr; - } else { - return reinterpret_cast<const FieldAnnotationsItem*>(&anno_dir[1]); - } + return (anno_dir->fields_size_ == 0) + ? nullptr + : reinterpret_cast<const FieldAnnotationsItem*>(&anno_dir[1]); } const MethodAnnotationsItem* GetMethodAnnotations(const AnnotationsDirectoryItem* anno_dir) const { if (anno_dir->methods_size_ == 0) { return nullptr; - } else { - // Skip past the header and field annotations. - const uint8_t* addr = reinterpret_cast<const uint8_t*>(&anno_dir[1]); - addr += anno_dir->fields_size_ * sizeof(FieldAnnotationsItem); - return reinterpret_cast<const MethodAnnotationsItem*>(addr); } + // Skip past the header and field annotations. + const uint8_t* addr = reinterpret_cast<const uint8_t*>(&anno_dir[1]); + addr += anno_dir->fields_size_ * sizeof(FieldAnnotationsItem); + return reinterpret_cast<const MethodAnnotationsItem*>(addr); } const ParameterAnnotationsItem* GetParameterAnnotations(const AnnotationsDirectoryItem* anno_dir) const { if (anno_dir->parameters_size_ == 0) { return nullptr; - } else { - // Skip past the header, field annotations, and method annotations. - const uint8_t* addr = reinterpret_cast<const uint8_t*>(&anno_dir[1]); - addr += anno_dir->fields_size_ * sizeof(FieldAnnotationsItem); - addr += anno_dir->methods_size_ * sizeof(MethodAnnotationsItem); - return reinterpret_cast<const ParameterAnnotationsItem*>(addr); } + // Skip past the header, field annotations, and method annotations. + const uint8_t* addr = reinterpret_cast<const uint8_t*>(&anno_dir[1]); + addr += anno_dir->fields_size_ * sizeof(FieldAnnotationsItem); + addr += anno_dir->methods_size_ * sizeof(MethodAnnotationsItem); + return reinterpret_cast<const ParameterAnnotationsItem*>(addr); } const AnnotationSetItem* GetFieldAnnotationSetItem(const FieldAnnotationsItem& anno_item) const { uint32_t offset = anno_item.annotations_off_; - if (offset == 0) { - return nullptr; - } else { - return reinterpret_cast<const AnnotationSetItem*>(begin_ + offset); - } + return (offset == 0) + ? nullptr + : reinterpret_cast<const AnnotationSetItem*>(begin_ + offset); } const AnnotationSetItem* GetMethodAnnotationSetItem(const MethodAnnotationsItem& anno_item) const { uint32_t offset = anno_item.annotations_off_; - if (offset == 0) { - return nullptr; - } else { - return reinterpret_cast<const AnnotationSetItem*>(begin_ + offset); - } + return (offset == 0) + ? nullptr + : reinterpret_cast<const AnnotationSetItem*>(begin_ + offset); } const AnnotationSetRefList* GetParameterAnnotationSetRefList( const ParameterAnnotationsItem* anno_item) const { uint32_t offset = anno_item->annotations_off_; - if (offset == 0) { - return nullptr; - } - return reinterpret_cast<const AnnotationSetRefList*>(begin_ + offset); + return (offset == 0) + ? nullptr + : reinterpret_cast<const AnnotationSetRefList*>(begin_ + offset); } const AnnotationItem* GetAnnotationItem(const AnnotationSetItem* set_item, uint32_t index) const { DCHECK_LE(index, set_item->size_); uint32_t offset = set_item->entries_[index]; - if (offset == 0) { - return nullptr; - } else { - return reinterpret_cast<const AnnotationItem*>(begin_ + offset); - } + return (offset == 0) + ? nullptr + : reinterpret_cast<const AnnotationItem*>(begin_ + offset); } const AnnotationSetItem* GetSetRefItemItem(const AnnotationSetRefItem* anno_item) const { uint32_t offset = anno_item->annotations_off_; - if (offset == 0) { - return nullptr; - } - return reinterpret_cast<const AnnotationSetItem*>(begin_ + offset); + return (offset == 0) + ? nullptr + : reinterpret_cast<const AnnotationSetItem*>(begin_ + offset); } // Debug info opcodes and constants @@ -1065,29 +930,6 @@ class DexFile { return size_; } - // Return the name of the index-th classes.dex in a multidex zip file. This is classes.dex for - // index == 0, and classes{index + 1}.dex else. - static std::string GetMultiDexClassesDexName(size_t index); - - // Return the (possibly synthetic) dex location for a multidex entry. This is dex_location for - // index == 0, and dex_location + multi-dex-separator + GetMultiDexClassesDexName(index) else. - static std::string GetMultiDexLocation(size_t index, const char* dex_location); - - // Returns the canonical form of the given dex location. - // - // There are different flavors of "dex locations" as follows: - // the file name of a dex file: - // The actual file path that the dex file has on disk. - // dex_location: - // This acts as a key for the class linker to know which dex file to load. - // It may correspond to either an old odex file or a particular dex file - // inside an oat file. In the first case it will also match the file name - // of the dex file. In the second case (oat) it will include the file name - // and possibly some multidex annotation to uniquely identify it. - // canonical_dex_location: - // the dex_location where it's file name part has been made canonical. - static std::string GetDexCanonicalLocation(const char* dex_location); - const OatDexFile* GetOatDexFile() const { return oat_dex_file_; } @@ -1113,64 +955,7 @@ class DexFile { // Returns a human-readable form of the type at an index. std::string PrettyType(dex::TypeIndex type_idx) const; - private: - static std::unique_ptr<const DexFile> OpenFile(int fd, - const std::string& location, - bool verify, - bool verify_checksum, - std::string* error_msg); - - enum class ZipOpenErrorCode { // private - kNoError, - kEntryNotFound, - kExtractToMemoryError, - kDexFileError, - kMakeReadOnlyError, - kVerifyError - }; - - // Open all classesXXX.dex files from a zip archive. - static bool OpenAllDexFilesFromZip(const ZipArchive& zip_archive, - const std::string& location, - bool verify_checksum, - std::string* error_msg, - std::vector<std::unique_ptr<const DexFile>>* dex_files); - - // Opens .dex file from the entry_name in a zip archive. error_code is undefined when non-null - // return. - static std::unique_ptr<const DexFile> OpenOneDexFileFromZip(const ZipArchive& zip_archive, - const char* entry_name, - const std::string& location, - bool verify_checksum, - std::string* error_msg, - ZipOpenErrorCode* error_code); - - enum class VerifyResult { // private - kVerifyNotAttempted, - kVerifySucceeded, - kVerifyFailed - }; - - static std::unique_ptr<DexFile> OpenCommon(const uint8_t* base, - size_t size, - const std::string& location, - uint32_t location_checksum, - const OatDexFile* oat_dex_file, - bool verify, - bool verify_checksum, - std::string* error_msg, - VerifyResult* verify_result = nullptr); - - - // Opens a .dex file at the given address, optionally backed by a MemMap - static std::unique_ptr<const DexFile> OpenMemory(const uint8_t* dex_file, - size_t size, - const std::string& location, - uint32_t location_checksum, - std::unique_ptr<MemMap> mem_map, - const OatDexFile* oat_dex_file, - std::string* error_msg); - + protected: DexFile(const uint8_t* base, size_t size, const std::string& location, @@ -1241,9 +1026,9 @@ class DexFile { // null. mutable const OatDexFile* oat_dex_file_; + friend class DexFileLoader; friend class DexFileVerifierTest; friend class OatWriter; - ART_FRIEND_TEST(ClassLinkerTest, RegisterDexFileName); // for constructor }; std::ostream& operator<<(std::ostream& os, const DexFile& dex_file); @@ -1252,7 +1037,7 @@ std::ostream& operator<<(std::ostream& os, const DexFile& dex_file); class DexFileParameterIterator { public: DexFileParameterIterator(const DexFile& dex_file, const DexFile::ProtoId& proto_id) - : dex_file_(dex_file), size_(0), pos_(0) { + : dex_file_(dex_file) { type_list_ = dex_file_.GetProtoParameters(proto_id); if (type_list_ != nullptr) { size_ = type_list_->Size(); @@ -1269,9 +1054,9 @@ class DexFileParameterIterator { } private: const DexFile& dex_file_; - const DexFile::TypeList* type_list_; - uint32_t size_; - uint32_t pos_; + const DexFile::TypeList* type_list_ = nullptr; + uint32_t size_ = 0; + uint32_t pos_ = 0; DISALLOW_IMPLICIT_CONSTRUCTORS(DexFileParameterIterator); }; @@ -1298,13 +1083,12 @@ class Signature : public ValueObject { Signature(const DexFile* dex, const DexFile::ProtoId& proto) : dex_file_(dex), proto_id_(&proto) { } - Signature() : dex_file_(nullptr), proto_id_(nullptr) { - } + Signature() = default; friend class DexFile; - const DexFile* const dex_file_; - const DexFile::ProtoId* const proto_id_; + const DexFile* const dex_file_ = nullptr; + const DexFile::ProtoId* const proto_id_ = nullptr; }; std::ostream& operator<<(std::ostream& os, const Signature& sig); @@ -1583,44 +1367,44 @@ class CallSiteArrayValueIterator : public EncodedArrayValueIterator { std::ostream& operator<<(std::ostream& os, const CallSiteArrayValueIterator::ValueType& code); class CatchHandlerIterator { - public: - CatchHandlerIterator(const DexFile::CodeItem& code_item, uint32_t address); + public: + CatchHandlerIterator(const DexFile::CodeItem& code_item, uint32_t address); - CatchHandlerIterator(const DexFile::CodeItem& code_item, - const DexFile::TryItem& try_item); + CatchHandlerIterator(const DexFile::CodeItem& code_item, + const DexFile::TryItem& try_item); - explicit CatchHandlerIterator(const uint8_t* handler_data) { - Init(handler_data); - } + explicit CatchHandlerIterator(const uint8_t* handler_data) { + Init(handler_data); + } - dex::TypeIndex GetHandlerTypeIndex() const { - return handler_.type_idx_; - } - uint32_t GetHandlerAddress() const { - return handler_.address_; - } - void Next(); - bool HasNext() const { - return remaining_count_ != -1 || catch_all_; - } - // End of this set of catch blocks, convenience method to locate next set of catch blocks - const uint8_t* EndDataPointer() const { - CHECK(!HasNext()); - return current_data_; - } + dex::TypeIndex GetHandlerTypeIndex() const { + return handler_.type_idx_; + } + uint32_t GetHandlerAddress() const { + return handler_.address_; + } + void Next(); + bool HasNext() const { + return remaining_count_ != -1 || catch_all_; + } + // End of this set of catch blocks, convenience method to locate next set of catch blocks + const uint8_t* EndDataPointer() const { + CHECK(!HasNext()); + return current_data_; + } - private: - void Init(const DexFile::CodeItem& code_item, int32_t offset); - void Init(const uint8_t* handler_data); - - struct CatchHandlerItem { - dex::TypeIndex type_idx_; // type index of the caught exception type - uint32_t address_; // handler address - } handler_; - const uint8_t* current_data_; // the current handler in dex file. - int32_t remaining_count_; // number of handlers not read. - bool catch_all_; // is there a handler that will catch all exceptions in case - // that all typed handler does not match. + private: + void Init(const DexFile::CodeItem& code_item, int32_t offset); + void Init(const uint8_t* handler_data); + + struct CatchHandlerItem { + dex::TypeIndex type_idx_; // type index of the caught exception type + uint32_t address_; // handler address + } handler_; + const uint8_t* current_data_; // the current handler in dex file. + int32_t remaining_count_; // number of handlers not read. + bool catch_all_; // is there a handler that will catch all exceptions in case + // that all typed handler does not match. }; } // namespace art diff --git a/runtime/dex_file_loader.cc b/runtime/dex_file_loader.cc new file mode 100644 index 0000000000..3ccb755f58 --- /dev/null +++ b/runtime/dex_file_loader.cc @@ -0,0 +1,484 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "dex_file_loader.h" + +#include <sys/mman.h> // For the PROT_* and MAP_* constants. +#include <sys/stat.h> + +#include "android-base/stringprintf.h" + +#include "base/file_magic.h" +#include "base/stl_util.h" +#include "base/systrace.h" +#include "base/unix_file/fd_file.h" +#include "dex_file.h" +#include "dex_file_verifier.h" +#include "native_dex_file.h" +#include "zip_archive.h" + +namespace art { + +using android::base::StringPrintf; + +static constexpr OatDexFile* kNoOatDexFile = nullptr; + + +bool DexFileLoader::IsValidMagic(uint32_t magic) { + return IsValidMagic(reinterpret_cast<uint8_t*>(&magic)); +} + +bool DexFileLoader::IsValidMagic(const uint8_t* magic) { + return NativeDexFile::IsMagicValid(magic); +} + +bool DexFileLoader::GetMultiDexChecksums(const char* filename, + std::vector<uint32_t>* checksums, + std::string* error_msg) { + CHECK(checksums != nullptr); + uint32_t magic; + + File fd = OpenAndReadMagic(filename, &magic, error_msg); + if (fd.Fd() == -1) { + DCHECK(!error_msg->empty()); + return false; + } + if (IsZipMagic(magic)) { + std::unique_ptr<ZipArchive> zip_archive( + ZipArchive::OpenFromFd(fd.Release(), filename, error_msg)); + if (zip_archive.get() == nullptr) { + *error_msg = StringPrintf("Failed to open zip archive '%s' (error msg: %s)", filename, + error_msg->c_str()); + return false; + } + + uint32_t i = 0; + std::string zip_entry_name = GetMultiDexClassesDexName(i++); + std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(zip_entry_name.c_str(), error_msg)); + if (zip_entry.get() == nullptr) { + *error_msg = StringPrintf("Zip archive '%s' doesn't contain %s (error msg: %s)", filename, + zip_entry_name.c_str(), error_msg->c_str()); + return false; + } + + do { + checksums->push_back(zip_entry->GetCrc32()); + zip_entry_name = GetMultiDexClassesDexName(i++); + zip_entry.reset(zip_archive->Find(zip_entry_name.c_str(), error_msg)); + } while (zip_entry.get() != nullptr); + return true; + } + if (IsValidMagic(magic)) { + std::unique_ptr<const DexFile> dex_file( + OpenFile(fd.Release(), filename, false, false, error_msg)); + if (dex_file == nullptr) { + return false; + } + checksums->push_back(dex_file->GetHeader().checksum_); + return true; + } + *error_msg = StringPrintf("Expected valid zip or dex file: '%s'", filename); + return false; +} + +bool DexFileLoader::IsMultiDexLocation(const char* location) { + return strrchr(location, kMultiDexSeparator) != nullptr; +} + +std::string DexFileLoader::GetMultiDexClassesDexName(size_t index) { + return (index == 0) ? "classes.dex" : StringPrintf("classes%zu.dex", index + 1); +} + +std::string DexFileLoader::GetMultiDexLocation(size_t index, const char* dex_location) { + return (index == 0) + ? dex_location + : StringPrintf("%s%cclasses%zu.dex", dex_location, kMultiDexSeparator, index + 1); +} + +std::string DexFileLoader::GetDexCanonicalLocation(const char* dex_location) { + CHECK_NE(dex_location, static_cast<const char*>(nullptr)); + std::string base_location = GetBaseLocation(dex_location); + const char* suffix = dex_location + base_location.size(); + DCHECK(suffix[0] == 0 || suffix[0] == kMultiDexSeparator); + UniqueCPtr<const char[]> path(realpath(base_location.c_str(), nullptr)); + if (path != nullptr && path.get() != base_location) { + return std::string(path.get()) + suffix; + } else if (suffix[0] == 0) { + return base_location; + } else { + return dex_location; + } +} + +std::unique_ptr<const DexFile> DexFileLoader::Open(const uint8_t* base, + size_t size, + const std::string& location, + uint32_t location_checksum, + const OatDexFile* oat_dex_file, + bool verify, + bool verify_checksum, + std::string* error_msg) { + ScopedTrace trace(std::string("Open dex file from RAM ") + location); + return OpenCommon(base, + size, + location, + location_checksum, + oat_dex_file, + verify, + verify_checksum, + error_msg); +} + +std::unique_ptr<const DexFile> DexFileLoader::Open(const std::string& location, + uint32_t location_checksum, + std::unique_ptr<MemMap> map, + bool verify, + bool verify_checksum, + std::string* error_msg) { + ScopedTrace trace(std::string("Open dex file from mapped-memory ") + location); + CHECK(map.get() != nullptr); + + if (map->Size() < sizeof(DexFile::Header)) { + *error_msg = StringPrintf( + "DexFile: failed to open dex file '%s' that is too short to have a header", + location.c_str()); + return nullptr; + } + + std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(), + map->Size(), + location, + location_checksum, + kNoOatDexFile, + verify, + verify_checksum, + error_msg); + if (dex_file != nullptr) { + dex_file->mem_map_ = std::move(map); + } + return dex_file; +} + +bool DexFileLoader::Open(const char* filename, + const std::string& location, + bool verify_checksum, + std::string* error_msg, + std::vector<std::unique_ptr<const DexFile>>* dex_files) { + ScopedTrace trace(std::string("Open dex file ") + std::string(location)); + DCHECK(dex_files != nullptr) << "DexFile::Open: out-param is nullptr"; + uint32_t magic; + File fd = OpenAndReadMagic(filename, &magic, error_msg); + if (fd.Fd() == -1) { + DCHECK(!error_msg->empty()); + return false; + } + if (IsZipMagic(magic)) { + return OpenZip(fd.Release(), location, verify_checksum, error_msg, dex_files); + } + if (IsValidMagic(magic)) { + std::unique_ptr<const DexFile> dex_file(OpenFile(fd.Release(), + location, + /* verify */ true, + verify_checksum, + error_msg)); + if (dex_file.get() != nullptr) { + dex_files->push_back(std::move(dex_file)); + return true; + } else { + return false; + } + } + *error_msg = StringPrintf("Expected valid zip or dex file: '%s'", filename); + return false; +} + +std::unique_ptr<const DexFile> DexFileLoader::OpenDex(int fd, + const std::string& location, + bool verify_checksum, + std::string* error_msg) { + ScopedTrace trace("Open dex file " + std::string(location)); + return OpenFile(fd, location, true /* verify */, verify_checksum, error_msg); +} + +bool DexFileLoader::OpenZip(int fd, + const std::string& location, + bool verify_checksum, + std::string* error_msg, + std::vector<std::unique_ptr<const DexFile>>* dex_files) { + ScopedTrace trace("Dex file open Zip " + std::string(location)); + DCHECK(dex_files != nullptr) << "DexFile::OpenZip: out-param is nullptr"; + std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd, location.c_str(), error_msg)); + if (zip_archive.get() == nullptr) { + DCHECK(!error_msg->empty()); + return false; + } + return OpenAllDexFilesFromZip(*zip_archive, location, verify_checksum, error_msg, dex_files); +} + +std::unique_ptr<const DexFile> DexFileLoader::OpenFile(int fd, + const std::string& location, + bool verify, + bool verify_checksum, + std::string* error_msg) { + ScopedTrace trace(std::string("Open dex file ") + std::string(location)); + CHECK(!location.empty()); + std::unique_ptr<MemMap> map; + { + File delayed_close(fd, /* check_usage */ false); + struct stat sbuf; + memset(&sbuf, 0, sizeof(sbuf)); + if (fstat(fd, &sbuf) == -1) { + *error_msg = StringPrintf("DexFile: fstat '%s' failed: %s", location.c_str(), + strerror(errno)); + return nullptr; + } + if (S_ISDIR(sbuf.st_mode)) { + *error_msg = StringPrintf("Attempt to mmap directory '%s'", location.c_str()); + return nullptr; + } + size_t length = sbuf.st_size; + map.reset(MemMap::MapFile(length, + PROT_READ, + MAP_PRIVATE, + fd, + 0, + /*low_4gb*/false, + location.c_str(), + error_msg)); + if (map == nullptr) { + DCHECK(!error_msg->empty()); + return nullptr; + } + } + + if (map->Size() < sizeof(DexFile::Header)) { + *error_msg = StringPrintf( + "DexFile: failed to open dex file '%s' that is too short to have a header", + location.c_str()); + return nullptr; + } + + const DexFile::Header* dex_header = reinterpret_cast<const DexFile::Header*>(map->Begin()); + + std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(), + map->Size(), + location, + dex_header->checksum_, + kNoOatDexFile, + verify, + verify_checksum, + error_msg); + if (dex_file != nullptr) { + dex_file->mem_map_ = std::move(map); + } + + return dex_file; +} + +std::unique_ptr<const DexFile> DexFileLoader::OpenOneDexFileFromZip( + const ZipArchive& zip_archive, + const char* entry_name, + const std::string& location, + bool verify_checksum, + std::string* error_msg, + ZipOpenErrorCode* error_code) { + ScopedTrace trace("Dex file open from Zip Archive " + std::string(location)); + CHECK(!location.empty()); + std::unique_ptr<ZipEntry> zip_entry(zip_archive.Find(entry_name, error_msg)); + if (zip_entry == nullptr) { + *error_code = ZipOpenErrorCode::kEntryNotFound; + return nullptr; + } + if (zip_entry->GetUncompressedLength() == 0) { + *error_msg = StringPrintf("Dex file '%s' has zero length", location.c_str()); + *error_code = ZipOpenErrorCode::kDexFileError; + return nullptr; + } + + std::unique_ptr<MemMap> map; + if (zip_entry->IsUncompressed()) { + if (!zip_entry->IsAlignedTo(alignof(DexFile::Header))) { + // Do not mmap unaligned ZIP entries because + // doing so would fail dex verification which requires 4 byte alignment. + LOG(WARNING) << "Can't mmap dex file " << location << "!" << entry_name << " directly; " + << "please zipalign to " << alignof(DexFile::Header) << " bytes. " + << "Falling back to extracting file."; + } else { + // Map uncompressed files within zip as file-backed to avoid a dirty copy. + map.reset(zip_entry->MapDirectlyFromFile(location.c_str(), /*out*/error_msg)); + if (map == nullptr) { + LOG(WARNING) << "Can't mmap dex file " << location << "!" << entry_name << " directly; " + << "is your ZIP file corrupted? Falling back to extraction."; + // Try again with Extraction which still has a chance of recovery. + } + } + } + + if (map == nullptr) { + // Default path for compressed ZIP entries, + // and fallback for stored ZIP entries. + map.reset(zip_entry->ExtractToMemMap(location.c_str(), entry_name, error_msg)); + } + + if (map == nullptr) { + *error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", entry_name, location.c_str(), + error_msg->c_str()); + *error_code = ZipOpenErrorCode::kExtractToMemoryError; + return nullptr; + } + VerifyResult verify_result; + std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(), + map->Size(), + location, + zip_entry->GetCrc32(), + kNoOatDexFile, + /* verify */ true, + verify_checksum, + error_msg, + &verify_result); + if (dex_file == nullptr) { + if (verify_result == VerifyResult::kVerifyNotAttempted) { + *error_code = ZipOpenErrorCode::kDexFileError; + } else { + *error_code = ZipOpenErrorCode::kVerifyError; + } + return nullptr; + } + dex_file->mem_map_ = std::move(map); + if (!dex_file->DisableWrite()) { + *error_msg = StringPrintf("Failed to make dex file '%s' read only", location.c_str()); + *error_code = ZipOpenErrorCode::kMakeReadOnlyError; + return nullptr; + } + CHECK(dex_file->IsReadOnly()) << location; + if (verify_result != VerifyResult::kVerifySucceeded) { + *error_code = ZipOpenErrorCode::kVerifyError; + return nullptr; + } + *error_code = ZipOpenErrorCode::kNoError; + return dex_file; +} + +// Technically we do not have a limitation with respect to the number of dex files that can be in a +// multidex APK. However, it's bad practice, as each dex file requires its own tables for symbols +// (types, classes, methods, ...) and dex caches. So warn the user that we open a zip with what +// seems an excessive number. +static constexpr size_t kWarnOnManyDexFilesThreshold = 100; + +bool DexFileLoader::OpenAllDexFilesFromZip(const ZipArchive& zip_archive, + const std::string& location, + bool verify_checksum, + std::string* error_msg, + std::vector<std::unique_ptr<const DexFile>>* dex_files) { + ScopedTrace trace("Dex file open from Zip " + std::string(location)); + DCHECK(dex_files != nullptr) << "DexFile::OpenFromZip: out-param is nullptr"; + ZipOpenErrorCode error_code; + std::unique_ptr<const DexFile> dex_file(OpenOneDexFileFromZip(zip_archive, + kClassesDex, + location, + verify_checksum, + error_msg, + &error_code)); + if (dex_file.get() == nullptr) { + return false; + } else { + // Had at least classes.dex. + dex_files->push_back(std::move(dex_file)); + + // Now try some more. + + // We could try to avoid std::string allocations by working on a char array directly. As we + // do not expect a lot of iterations, this seems too involved and brittle. + + for (size_t i = 1; ; ++i) { + std::string name = GetMultiDexClassesDexName(i); + std::string fake_location = GetMultiDexLocation(i, location.c_str()); + std::unique_ptr<const DexFile> next_dex_file(OpenOneDexFileFromZip(zip_archive, + name.c_str(), + fake_location, + verify_checksum, + error_msg, + &error_code)); + if (next_dex_file.get() == nullptr) { + if (error_code != ZipOpenErrorCode::kEntryNotFound) { + LOG(WARNING) << "Zip open failed: " << *error_msg; + } + break; + } else { + dex_files->push_back(std::move(next_dex_file)); + } + + if (i == kWarnOnManyDexFilesThreshold) { + LOG(WARNING) << location << " has in excess of " << kWarnOnManyDexFilesThreshold + << " dex files. Please consider coalescing and shrinking the number to " + " avoid runtime overhead."; + } + + if (i == std::numeric_limits<size_t>::max()) { + LOG(ERROR) << "Overflow in number of dex files!"; + break; + } + } + + return true; + } +} + +std::unique_ptr<DexFile> DexFileLoader::OpenCommon(const uint8_t* base, + size_t size, + const std::string& location, + uint32_t location_checksum, + const OatDexFile* oat_dex_file, + bool verify, + bool verify_checksum, + std::string* error_msg, + VerifyResult* verify_result) { + if (verify_result != nullptr) { + *verify_result = VerifyResult::kVerifyNotAttempted; + } + std::unique_ptr<DexFile> dex_file; + if (NativeDexFile::IsMagicValid(base)) { + dex_file.reset(new NativeDexFile(base, size, location, location_checksum, oat_dex_file)); + } else { + return nullptr; + } + if (dex_file == nullptr) { + *error_msg = StringPrintf("Failed to open dex file '%s' from memory: %s", location.c_str(), + error_msg->c_str()); + return nullptr; + } + if (!dex_file->Init(error_msg)) { + dex_file.reset(); + return nullptr; + } + if (verify && !DexFileVerifier::Verify(dex_file.get(), + dex_file->Begin(), + dex_file->Size(), + location.c_str(), + verify_checksum, + error_msg)) { + if (verify_result != nullptr) { + *verify_result = VerifyResult::kVerifyFailed; + } + return nullptr; + } + if (verify_result != nullptr) { + *verify_result = VerifyResult::kVerifySucceeded; + } + return dex_file; +} + +} // namespace art diff --git a/runtime/dex_file_loader.h b/runtime/dex_file_loader.h new file mode 100644 index 0000000000..61b5c71726 --- /dev/null +++ b/runtime/dex_file_loader.h @@ -0,0 +1,200 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_DEX_FILE_LOADER_H_ +#define ART_RUNTIME_DEX_FILE_LOADER_H_ + +#include <cstdint> +#include <memory> +#include <string> +#include <vector> + +namespace art { + +class DexFile; +class MemMap; +class OatDexFile; +class ZipArchive; + +// Class that is used to open dex files and deal with corresponding multidex and location logic. +class DexFileLoader { + public: + // name of the DexFile entry within a zip archive + static constexpr const char* kClassesDex = "classes.dex"; + + // The separator character in MultiDex locations. + static constexpr char kMultiDexSeparator = '!'; + + // Return true if the magic is valid for dex or cdex. + static bool IsValidMagic(uint32_t magic); + static bool IsValidMagic(const uint8_t* magic); + + // Returns the checksums of a file for comparison with GetLocationChecksum(). + // For .dex files, this is the single header checksum. + // For zip files, this is the zip entry CRC32 checksum for classes.dex and + // each additional multidex entry classes2.dex, classes3.dex, etc. + // Return true if the checksums could be found, false otherwise. + static bool GetMultiDexChecksums(const char* filename, + std::vector<uint32_t>* checksums, + std::string* error_msg); + + // Check whether a location denotes a multidex dex file. This is a very simple check: returns + // whether the string contains the separator character. + static bool IsMultiDexLocation(const char* location); + + // Opens .dex file, backed by existing memory + static std::unique_ptr<const DexFile> Open(const uint8_t* base, + size_t size, + const std::string& location, + uint32_t location_checksum, + const OatDexFile* oat_dex_file, + bool verify, + bool verify_checksum, + std::string* error_msg); + + // Opens .dex file that has been memory-mapped by the caller. + static std::unique_ptr<const DexFile> Open(const std::string& location, + uint32_t location_checkum, + std::unique_ptr<MemMap> mem_map, + bool verify, + bool verify_checksum, + std::string* error_msg); + + // Opens all .dex files found in the file, guessing the container format based on file extension. + static bool Open(const char* filename, + const std::string& location, + bool verify_checksum, + std::string* error_msg, + std::vector<std::unique_ptr<const DexFile>>* dex_files); + + // Open a single dex file from an fd. This function closes the fd. + static std::unique_ptr<const DexFile> OpenDex(int fd, + const std::string& location, + bool verify_checksum, + std::string* error_msg); + + // Opens dex files from within a .jar, .zip, or .apk file + static bool OpenZip(int fd, + const std::string& location, + bool verify_checksum, + std::string* error_msg, + std::vector<std::unique_ptr<const DexFile>>* dex_files); + + // Return the name of the index-th classes.dex in a multidex zip file. This is classes.dex for + // index == 0, and classes{index + 1}.dex else. + static std::string GetMultiDexClassesDexName(size_t index); + + // Return the (possibly synthetic) dex location for a multidex entry. This is dex_location for + // index == 0, and dex_location + multi-dex-separator + GetMultiDexClassesDexName(index) else. + static std::string GetMultiDexLocation(size_t index, const char* dex_location); + + // Returns the canonical form of the given dex location. + // + // There are different flavors of "dex locations" as follows: + // the file name of a dex file: + // The actual file path that the dex file has on disk. + // dex_location: + // This acts as a key for the class linker to know which dex file to load. + // It may correspond to either an old odex file or a particular dex file + // inside an oat file. In the first case it will also match the file name + // of the dex file. In the second case (oat) it will include the file name + // and possibly some multidex annotation to uniquely identify it. + // canonical_dex_location: + // the dex_location where it's file name part has been made canonical. + static std::string GetDexCanonicalLocation(const char* dex_location); + + // For normal dex files, location and base location coincide. If a dex file is part of a multidex + // archive, the base location is the name of the originating jar/apk, stripped of any internal + // classes*.dex path. + static std::string GetBaseLocation(const char* location) { + const char* pos = strrchr(location, kMultiDexSeparator); + return (pos == nullptr) ? location : std::string(location, pos - location); + } + + static std::string GetBaseLocation(const std::string& location) { + return GetBaseLocation(location.c_str()); + } + + // Returns the '!classes*.dex' part of the dex location. Returns an empty + // string if there is no multidex suffix for the given location. + // The kMultiDexSeparator is included in the returned suffix. + static std::string GetMultiDexSuffix(const std::string& location) { + size_t pos = location.rfind(kMultiDexSeparator); + return (pos == std::string::npos) ? std::string() : location.substr(pos); + } + + private: + static std::unique_ptr<const DexFile> OpenFile(int fd, + const std::string& location, + bool verify, + bool verify_checksum, + std::string* error_msg); + + enum class ZipOpenErrorCode { + kNoError, + kEntryNotFound, + kExtractToMemoryError, + kDexFileError, + kMakeReadOnlyError, + kVerifyError + }; + + // Open all classesXXX.dex files from a zip archive. + static bool OpenAllDexFilesFromZip(const ZipArchive& zip_archive, + const std::string& location, + bool verify_checksum, + std::string* error_msg, + std::vector<std::unique_ptr<const DexFile>>* dex_files); + + // Opens .dex file from the entry_name in a zip archive. error_code is undefined when non-null + // return. + static std::unique_ptr<const DexFile> OpenOneDexFileFromZip(const ZipArchive& zip_archive, + const char* entry_name, + const std::string& location, + bool verify_checksum, + std::string* error_msg, + ZipOpenErrorCode* error_code); + + enum class VerifyResult { // private + kVerifyNotAttempted, + kVerifySucceeded, + kVerifyFailed + }; + + static std::unique_ptr<DexFile> OpenCommon(const uint8_t* base, + size_t size, + const std::string& location, + uint32_t location_checksum, + const OatDexFile* oat_dex_file, + bool verify, + bool verify_checksum, + std::string* error_msg, + VerifyResult* verify_result = nullptr); + + + // Opens a .dex file at the given address, optionally backed by a MemMap + static std::unique_ptr<const DexFile> OpenMemory(const uint8_t* dex_file, + size_t size, + const std::string& location, + uint32_t location_checksum, + std::unique_ptr<MemMap> mem_map, + const OatDexFile* oat_dex_file, + std::string* error_msg); +}; + +} // namespace art + +#endif // ART_RUNTIME_DEX_FILE_LOADER_H_ diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc index 67cd42803d..b3011379c6 100644 --- a/runtime/dex_file_test.cc +++ b/runtime/dex_file_test.cc @@ -24,6 +24,7 @@ #include "base/unix_file/fd_file.h" #include "common_runtime_test.h" #include "dex_file-inl.h" +#include "dex_file_loader.h" #include "mem_map.h" #include "os.h" #include "scoped_thread_state_change-inl.h" @@ -235,7 +236,7 @@ static bool OpenDexFilesBase64(const char* base64, ScopedObjectAccess soa(Thread::Current()); static constexpr bool kVerifyChecksum = true; std::vector<std::unique_ptr<const DexFile>> tmp; - bool success = DexFile::Open(location, location, kVerifyChecksum, error_msg, &tmp); + bool success = DexFileLoader::Open(location, location, kVerifyChecksum, error_msg, &tmp); if (success) { for (std::unique_ptr<const DexFile>& dex_file : tmp) { EXPECT_EQ(PROT_READ, dex_file->GetPermissions()); @@ -274,12 +275,12 @@ static std::unique_ptr<const DexFile> OpenDexFileInMemoryBase64(const char* base /* reuse */ false, &error_message)); memcpy(region->Begin(), dex_bytes.data(), dex_bytes.size()); - std::unique_ptr<const DexFile> dex_file(DexFile::Open(location, - location_checksum, - std::move(region), - /* verify */ true, - /* verify_checksum */ true, - &error_message)); + std::unique_ptr<const DexFile> dex_file(DexFileLoader::Open(location, + location_checksum, + std::move(region), + /* verify */ true, + /* verify_checksum */ true, + &error_message)); if (expect_success) { CHECK(dex_file != nullptr) << error_message; } else { @@ -365,7 +366,7 @@ TEST_F(DexFileTest, Version40Rejected) { static constexpr bool kVerifyChecksum = true; std::string error_msg; std::vector<std::unique_ptr<const DexFile>> dex_files; - ASSERT_FALSE(DexFile::Open(location, location, kVerifyChecksum, &error_msg, &dex_files)); + ASSERT_FALSE(DexFileLoader::Open(location, location, kVerifyChecksum, &error_msg, &dex_files)); } TEST_F(DexFileTest, Version41Rejected) { @@ -377,7 +378,7 @@ TEST_F(DexFileTest, Version41Rejected) { static constexpr bool kVerifyChecksum = true; std::string error_msg; std::vector<std::unique_ptr<const DexFile>> dex_files; - ASSERT_FALSE(DexFile::Open(location, location, kVerifyChecksum, &error_msg, &dex_files)); + ASSERT_FALSE(DexFileLoader::Open(location, location, kVerifyChecksum, &error_msg, &dex_files)); } TEST_F(DexFileTest, ZeroLengthDexRejected) { @@ -389,7 +390,7 @@ TEST_F(DexFileTest, ZeroLengthDexRejected) { static constexpr bool kVerifyChecksum = true; std::string error_msg; std::vector<std::unique_ptr<const DexFile>> dex_files; - ASSERT_FALSE(DexFile::Open(location, location, kVerifyChecksum, &error_msg, &dex_files)); + ASSERT_FALSE(DexFileLoader::Open(location, location, kVerifyChecksum, &error_msg, &dex_files)); } TEST_F(DexFileTest, GetLocationChecksum) { @@ -402,7 +403,9 @@ TEST_F(DexFileTest, GetChecksum) { std::vector<uint32_t> checksums; ScopedObjectAccess soa(Thread::Current()); std::string error_msg; - EXPECT_TRUE(DexFile::GetMultiDexChecksums(GetLibCoreDexFileNames()[0].c_str(), &checksums, &error_msg)) + EXPECT_TRUE(DexFileLoader::GetMultiDexChecksums(GetLibCoreDexFileNames()[0].c_str(), + &checksums, + &error_msg)) << error_msg; ASSERT_EQ(1U, checksums.size()); EXPECT_EQ(java_lang_dex_file_->GetLocationChecksum(), checksums[0]); @@ -412,18 +415,18 @@ TEST_F(DexFileTest, GetMultiDexChecksums) { std::string error_msg; std::vector<uint32_t> checksums; std::string multidex_file = GetTestDexFileName("MultiDex"); - EXPECT_TRUE(DexFile::GetMultiDexChecksums(multidex_file.c_str(), - &checksums, - &error_msg)) << error_msg; + EXPECT_TRUE(DexFileLoader::GetMultiDexChecksums(multidex_file.c_str(), + &checksums, + &error_msg)) << error_msg; std::vector<std::unique_ptr<const DexFile>> dexes = OpenTestDexFiles("MultiDex"); ASSERT_EQ(2U, dexes.size()); ASSERT_EQ(2U, checksums.size()); - EXPECT_EQ(dexes[0]->GetLocation(), DexFile::GetMultiDexLocation(0, multidex_file.c_str())); + EXPECT_EQ(dexes[0]->GetLocation(), DexFileLoader::GetMultiDexLocation(0, multidex_file.c_str())); EXPECT_EQ(dexes[0]->GetLocationChecksum(), checksums[0]); - EXPECT_EQ(dexes[1]->GetLocation(), DexFile::GetMultiDexLocation(1, multidex_file.c_str())); + EXPECT_EQ(dexes[1]->GetLocation(), DexFileLoader::GetMultiDexLocation(1, multidex_file.c_str())); EXPECT_EQ(dexes[1]->GetLocationChecksum(), checksums[1]); } @@ -625,20 +628,20 @@ TEST_F(DexFileTest, FindFieldId) { } TEST_F(DexFileTest, GetMultiDexClassesDexName) { - ASSERT_EQ("classes.dex", DexFile::GetMultiDexClassesDexName(0)); - ASSERT_EQ("classes2.dex", DexFile::GetMultiDexClassesDexName(1)); - ASSERT_EQ("classes3.dex", DexFile::GetMultiDexClassesDexName(2)); - ASSERT_EQ("classes100.dex", DexFile::GetMultiDexClassesDexName(99)); + ASSERT_EQ("classes.dex", DexFileLoader::GetMultiDexClassesDexName(0)); + ASSERT_EQ("classes2.dex", DexFileLoader::GetMultiDexClassesDexName(1)); + ASSERT_EQ("classes3.dex", DexFileLoader::GetMultiDexClassesDexName(2)); + ASSERT_EQ("classes100.dex", DexFileLoader::GetMultiDexClassesDexName(99)); } TEST_F(DexFileTest, GetMultiDexLocation) { std::string dex_location_str = "/system/app/framework.jar"; const char* dex_location = dex_location_str.c_str(); - ASSERT_EQ("/system/app/framework.jar", DexFile::GetMultiDexLocation(0, dex_location)); + ASSERT_EQ("/system/app/framework.jar", DexFileLoader::GetMultiDexLocation(0, dex_location)); ASSERT_EQ("/system/app/framework.jar!classes2.dex", - DexFile::GetMultiDexLocation(1, dex_location)); + DexFileLoader::GetMultiDexLocation(1, dex_location)); ASSERT_EQ("/system/app/framework.jar!classes101.dex", - DexFile::GetMultiDexLocation(100, dex_location)); + DexFileLoader::GetMultiDexLocation(100, dex_location)); } TEST_F(DexFileTest, GetDexCanonicalLocation) { @@ -646,28 +649,30 @@ TEST_F(DexFileTest, GetDexCanonicalLocation) { UniqueCPtr<const char[]> dex_location_real(realpath(file.GetFilename().c_str(), nullptr)); std::string dex_location(dex_location_real.get()); - ASSERT_EQ(dex_location, DexFile::GetDexCanonicalLocation(dex_location.c_str())); - std::string multidex_location = DexFile::GetMultiDexLocation(1, dex_location.c_str()); - ASSERT_EQ(multidex_location, DexFile::GetDexCanonicalLocation(multidex_location.c_str())); + ASSERT_EQ(dex_location, DexFileLoader::GetDexCanonicalLocation(dex_location.c_str())); + std::string multidex_location = DexFileLoader::GetMultiDexLocation(1, dex_location.c_str()); + ASSERT_EQ(multidex_location, DexFileLoader::GetDexCanonicalLocation(multidex_location.c_str())); std::string dex_location_sym = dex_location + "symlink"; ASSERT_EQ(0, symlink(dex_location.c_str(), dex_location_sym.c_str())); - ASSERT_EQ(dex_location, DexFile::GetDexCanonicalLocation(dex_location_sym.c_str())); + ASSERT_EQ(dex_location, DexFileLoader::GetDexCanonicalLocation(dex_location_sym.c_str())); - std::string multidex_location_sym = DexFile::GetMultiDexLocation(1, dex_location_sym.c_str()); - ASSERT_EQ(multidex_location, DexFile::GetDexCanonicalLocation(multidex_location_sym.c_str())); + std::string multidex_location_sym = DexFileLoader::GetMultiDexLocation( + 1, dex_location_sym.c_str()); + ASSERT_EQ(multidex_location, + DexFileLoader::GetDexCanonicalLocation(multidex_location_sym.c_str())); ASSERT_EQ(0, unlink(dex_location_sym.c_str())); } TEST(DexFileUtilsTest, GetBaseLocationAndMultiDexSuffix) { - EXPECT_EQ("/foo/bar/baz.jar", DexFile::GetBaseLocation("/foo/bar/baz.jar")); - EXPECT_EQ("/foo/bar/baz.jar", DexFile::GetBaseLocation("/foo/bar/baz.jar!classes2.dex")); - EXPECT_EQ("/foo/bar/baz.jar", DexFile::GetBaseLocation("/foo/bar/baz.jar!classes8.dex")); - EXPECT_EQ("", DexFile::GetMultiDexSuffix("/foo/bar/baz.jar")); - EXPECT_EQ("!classes2.dex", DexFile::GetMultiDexSuffix("/foo/bar/baz.jar!classes2.dex")); - EXPECT_EQ("!classes8.dex", DexFile::GetMultiDexSuffix("/foo/bar/baz.jar!classes8.dex")); + EXPECT_EQ("/foo/bar/baz.jar", DexFileLoader::GetBaseLocation("/foo/bar/baz.jar")); + EXPECT_EQ("/foo/bar/baz.jar", DexFileLoader::GetBaseLocation("/foo/bar/baz.jar!classes2.dex")); + EXPECT_EQ("/foo/bar/baz.jar", DexFileLoader::GetBaseLocation("/foo/bar/baz.jar!classes8.dex")); + EXPECT_EQ("", DexFileLoader::GetMultiDexSuffix("/foo/bar/baz.jar")); + EXPECT_EQ("!classes2.dex", DexFileLoader::GetMultiDexSuffix("/foo/bar/baz.jar!classes2.dex")); + EXPECT_EQ("!classes8.dex", DexFileLoader::GetMultiDexSuffix("/foo/bar/baz.jar!classes8.dex")); } TEST_F(DexFileTest, ZipOpenClassesPresent) { diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc index 21de059797..af2d53585e 100644 --- a/runtime/dex_file_verifier_test.cc +++ b/runtime/dex_file_verifier_test.cc @@ -27,8 +27,10 @@ #include "base/unix_file/fd_file.h" #include "common_runtime_test.h" #include "dex_file-inl.h" +#include "dex_file_loader.h" #include "dex_file_types.h" #include "leb128.h" +#include "native_dex_file.h" #include "scoped_thread_state_change-inl.h" #include "thread-current-inl.h" #include "utils.h" @@ -55,7 +57,7 @@ static void FixUpChecksum(uint8_t* dex_file) { class DexFileVerifierTest : public CommonRuntimeTest { protected: DexFile* GetDexFile(const uint8_t* dex_bytes, size_t length) { - return new DexFile(dex_bytes, length, "tmp", 0, nullptr); + return new NativeDexFile(dex_bytes, length, "tmp", 0, nullptr); } void VerifyModification(const char* dex_file_base64_content, @@ -112,7 +114,7 @@ static std::unique_ptr<const DexFile> OpenDexFileBase64(const char* base64, // read dex file ScopedObjectAccess soa(Thread::Current()); std::vector<std::unique_ptr<const DexFile>> tmp; - bool success = DexFile::Open(location, location, true, error_msg, &tmp); + bool success = DexFileLoader::Open(location, location, true, error_msg, &tmp); CHECK(success) << *error_msg; EXPECT_EQ(1U, tmp.size()); std::unique_ptr<const DexFile> dex_file = std::move(tmp[0]); diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index 813a264ed9..ea7a83c75e 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -2185,20 +2185,11 @@ extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** // Generic JNI trampoline at this stage; instead, method's // annotations' classes are looked up in the bootstrap class // loader's resolved types (which won't trigger an exception). + CHECK(!self->IsExceptionPending()); bool critical_native = called->IsAnnotatedWithCriticalNative(); - // ArtMethod::IsAnnotatedWithCriticalNative should not throw - // an exception; clear it if it happened anyway. - // TODO: Revisit this code path and turn this into a CHECK(!self->IsExceptionPending()). - if (self->IsExceptionPending()) { - self->ClearException(); - } + CHECK(!self->IsExceptionPending()); bool fast_native = called->IsAnnotatedWithFastNative(); - // ArtMethod::IsAnnotatedWithFastNative should not throw - // an exception; clear it if it happened anyway. - // TODO: Revisit this code path and turn this into a CHECK(!self->IsExceptionPending()). - if (self->IsExceptionPending()) { - self->ClearException(); - } + CHECK(!self->IsExceptionPending()); bool normal_native = !critical_native && !fast_native; // Restore the initial ArtMethod pointer at `*sp`. *sp = called; diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc index 732c707670..f0eada3cb4 100644 --- a/runtime/gc/space/image_space.cc +++ b/runtime/gc/space/image_space.cc @@ -35,6 +35,7 @@ #include "base/stl_util.h" #include "base/systrace.h" #include "base/time_utils.h" +#include "dex_file_loader.h" #include "exec_utils.h" #include "gc/accounting/space_bitmap-inl.h" #include "image-inl.h" @@ -1829,12 +1830,12 @@ bool ImageSpace::ValidateOatFile(const OatFile& oat_file, std::string* error_msg // Skip multidex locations - These will be checked when we visit their // corresponding primary non-multidex location. - if (DexFile::IsMultiDexLocation(dex_file_location.c_str())) { + if (DexFileLoader::IsMultiDexLocation(dex_file_location.c_str())) { continue; } std::vector<uint32_t> checksums; - if (!DexFile::GetMultiDexChecksums(dex_file_location.c_str(), &checksums, error_msg)) { + if (!DexFileLoader::GetMultiDexChecksums(dex_file_location.c_str(), &checksums, error_msg)) { *error_msg = StringPrintf("ValidateOatFile failed to get checksums of dex file '%s' " "referenced by oat file %s: %s", dex_file_location.c_str(), @@ -1855,7 +1856,9 @@ bool ImageSpace::ValidateOatFile(const OatFile& oat_file, std::string* error_msg // Verify checksums for any related multidex entries. for (size_t i = 1; i < checksums.size(); i++) { - std::string multi_dex_location = DexFile::GetMultiDexLocation(i, dex_file_location.c_str()); + std::string multi_dex_location = DexFileLoader::GetMultiDexLocation( + i, + dex_file_location.c_str()); const OatFile::OatDexFile* multi_dex = oat_file.GetOatDexFile(multi_dex_location.c_str(), nullptr, error_msg); diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc index 2dd4db3895..2c8ec47492 100644 --- a/runtime/indirect_reference_table.cc +++ b/runtime/indirect_reference_table.cc @@ -511,7 +511,7 @@ bool IndirectReferenceTable::EnsureFreeCapacity(size_t free_capacity, std::strin return true; } -size_t IndirectReferenceTable::FreeCapacity() { +size_t IndirectReferenceTable::FreeCapacity() const { return max_entries_ - segment_state_.top_index; } diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h index 7daf01ce61..6675099523 100644 --- a/runtime/indirect_reference_table.h +++ b/runtime/indirect_reference_table.h @@ -293,7 +293,7 @@ class IndirectReferenceTable { REQUIRES_SHARED(Locks::mutator_lock_); // See implementation of EnsureFreeCapacity. We'll only state here how much is trivially free, // without recovering holes. Thus this is a conservative estimate. - size_t FreeCapacity() REQUIRES_SHARED(Locks::mutator_lock_); + size_t FreeCapacity() const; // Note IrtIterator does not have a read barrier as it's used to visit roots. IrtIterator begin() { diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc index 5a1605323e..73746e18ef 100644 --- a/runtime/java_vm_ext.cc +++ b/runtime/java_vm_ext.cc @@ -28,6 +28,8 @@ #include "check_jni.h" #include "dex_file-inl.h" #include "fault_handler.h" +#include "gc/allocation_record.h" +#include "gc/heap.h" #include "gc_root-inl.h" #include "indirect_reference_table-inl.h" #include "jni_internal.h" @@ -468,7 +470,11 @@ JavaVMExt::JavaVMExt(Runtime* runtime, weak_globals_add_condition_("weak globals add condition", (CHECK(Locks::jni_weak_globals_lock_ != nullptr), *Locks::jni_weak_globals_lock_)), - env_hooks_() { + env_hooks_(), + enable_allocation_tracking_delta_( + runtime_options.GetOrDefault(RuntimeArgumentMap::GlobalRefAllocStackTraceLimit)), + allocation_tracking_enabled_(false), + old_allocation_tracking_state_(false) { functions = unchecked_functions_; SetCheckJniEnabled(runtime_options.Exists(RuntimeArgumentMap::CheckJni)); } @@ -583,18 +589,55 @@ bool JavaVMExt::ShouldTrace(ArtMethod* method) { return true; } +void JavaVMExt::CheckGlobalRefAllocationTracking() { + if (LIKELY(enable_allocation_tracking_delta_ == 0)) { + return; + } + size_t simple_free_capacity = globals_.FreeCapacity(); + if (UNLIKELY(simple_free_capacity <= enable_allocation_tracking_delta_)) { + if (!allocation_tracking_enabled_) { + LOG(WARNING) << "Global reference storage appears close to exhaustion, program termination " + << "may be imminent. Enabling allocation tracking to improve abort diagnostics. " + << "This will result in program slow-down."; + + old_allocation_tracking_state_ = runtime_->GetHeap()->IsAllocTrackingEnabled(); + if (!old_allocation_tracking_state_) { + // Need to be guaranteed suspended. + ScopedObjectAccess soa(Thread::Current()); + ScopedThreadSuspension sts(soa.Self(), ThreadState::kNative); + gc::AllocRecordObjectMap::SetAllocTrackingEnabled(true); + } + allocation_tracking_enabled_ = true; + } + } else { + if (UNLIKELY(allocation_tracking_enabled_)) { + if (!old_allocation_tracking_state_) { + // Need to be guaranteed suspended. + ScopedObjectAccess soa(Thread::Current()); + ScopedThreadSuspension sts(soa.Self(), ThreadState::kNative); + gc::AllocRecordObjectMap::SetAllocTrackingEnabled(false); + } + allocation_tracking_enabled_ = false; + } + } +} + jobject JavaVMExt::AddGlobalRef(Thread* self, ObjPtr<mirror::Object> obj) { // Check for null after decoding the object to handle cleared weak globals. if (obj == nullptr) { return nullptr; } - WriterMutexLock mu(self, *Locks::jni_globals_lock_); + IndirectRef ref; std::string error_msg; - IndirectRef ref = globals_.Add(kIRTFirstSegment, obj, &error_msg); + { + WriterMutexLock mu(self, *Locks::jni_globals_lock_); + ref = globals_.Add(kIRTFirstSegment, obj, &error_msg); + } if (UNLIKELY(ref == nullptr)) { LOG(FATAL) << error_msg; UNREACHABLE(); } + CheckGlobalRefAllocationTracking(); return reinterpret_cast<jobject>(ref); } @@ -625,11 +668,14 @@ void JavaVMExt::DeleteGlobalRef(Thread* self, jobject obj) { if (obj == nullptr) { return; } - WriterMutexLock mu(self, *Locks::jni_globals_lock_); - if (!globals_.Remove(kIRTFirstSegment, obj)) { - LOG(WARNING) << "JNI WARNING: DeleteGlobalRef(" << obj << ") " - << "failed to find entry"; + { + WriterMutexLock mu(self, *Locks::jni_globals_lock_); + if (!globals_.Remove(kIRTFirstSegment, obj)) { + LOG(WARNING) << "JNI WARNING: DeleteGlobalRef(" << obj << ") " + << "failed to find entry"; + } } + CheckGlobalRefAllocationTracking(); } void JavaVMExt::DeleteWeakGlobalRef(Thread* self, jweak obj) { diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h index b767b199f0..0510d6ab75 100644 --- a/runtime/java_vm_ext.h +++ b/runtime/java_vm_ext.h @@ -211,6 +211,8 @@ class JavaVMExt : public JavaVM { REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::jni_weak_globals_lock_); + void CheckGlobalRefAllocationTracking(); + Runtime* const runtime_; // Used for testing. By default, we'll LOG(FATAL) the reason. @@ -247,6 +249,10 @@ class JavaVMExt : public JavaVM { // TODO Maybe move this to Runtime. std::vector<GetEnvHook> env_hooks_; + size_t enable_allocation_tracking_delta_; + std::atomic<bool> allocation_tracking_enabled_; + std::atomic<bool> old_allocation_tracking_state_; + DISALLOW_COPY_AND_ASSIGN(JavaVMExt); }; diff --git a/runtime/java_vm_ext_test.cc b/runtime/java_vm_ext_test.cc index 2cbfa81b91..a15ec56274 100644 --- a/runtime/java_vm_ext_test.cc +++ b/runtime/java_vm_ext_test.cc @@ -19,6 +19,7 @@ #include <pthread.h> #include "common_runtime_test.h" +#include "gc/heap.h" #include "java_vm_ext.h" #include "runtime.h" @@ -134,4 +135,49 @@ TEST_F(JavaVmExtTest, DetachCurrentThread) { EXPECT_EQ(JNI_ERR, err); } +class JavaVmExtStackTraceTest : public JavaVmExtTest { + protected: + void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE { + options->emplace_back("-XX:GlobalRefAllocStackTraceLimit=50000", nullptr); + } +}; + +TEST_F(JavaVmExtStackTraceTest, TestEnableDisable) { + ASSERT_FALSE(Runtime::Current()->GetHeap()->IsAllocTrackingEnabled()); + + JNIEnv* env; + jint ok = vm_->AttachCurrentThread(&env, nullptr); + ASSERT_EQ(JNI_OK, ok); + + std::vector<jobject> global_refs_; + jobject local_ref = env->NewStringUTF("Dummy"); + for (size_t i = 0; i < 2000; ++i) { + global_refs_.push_back(env->NewGlobalRef(local_ref)); + } + + EXPECT_TRUE(Runtime::Current()->GetHeap()->IsAllocTrackingEnabled()); + + for (jobject global_ref : global_refs_) { + env->DeleteGlobalRef(global_ref); + } + + EXPECT_FALSE(Runtime::Current()->GetHeap()->IsAllocTrackingEnabled()); + + global_refs_.clear(); + for (size_t i = 0; i < 2000; ++i) { + global_refs_.push_back(env->NewGlobalRef(local_ref)); + } + + EXPECT_TRUE(Runtime::Current()->GetHeap()->IsAllocTrackingEnabled()); + + for (jobject global_ref : global_refs_) { + env->DeleteGlobalRef(global_ref); + } + + EXPECT_FALSE(Runtime::Current()->GetHeap()->IsAllocTrackingEnabled()); + + ok = vm_->DetachCurrentThread(); + EXPECT_EQ(JNI_OK, ok); +} + } // namespace art diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index e122c6da20..47615f56fe 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -26,6 +26,7 @@ #include "base/time_utils.h" #include "cha.h" #include "debugger_interface.h" +#include "dex_file_loader.h" #include "entrypoints/runtime_asm_entrypoints.h" #include "gc/accounting/bitmap-inl.h" #include "gc/scoped_gc_critical_section.h" @@ -1350,7 +1351,8 @@ void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_loca for (const ProfilingInfo* info : profiling_infos_) { ArtMethod* method = info->GetMethod(); const DexFile* dex_file = method->GetDexFile(); - if (!ContainsElement(dex_base_locations, dex_file->GetBaseLocation())) { + const std::string base_location = DexFileLoader::GetBaseLocation(dex_file->GetLocation()); + if (!ContainsElement(dex_base_locations, base_location)) { // Skip dex files which are not profiled. continue; } @@ -1404,7 +1406,8 @@ void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_loca is_missing_types = true; continue; } - if (ContainsElement(dex_base_locations, class_dex_file->GetBaseLocation())) { + if (ContainsElement(dex_base_locations, + DexFileLoader::GetBaseLocation(class_dex_file->GetLocation()))) { // Only consider classes from the same apk (including multidex). profile_classes.emplace_back(/*ProfileMethodInfo::ProfileClassReference*/ class_dex_file, type_index); diff --git a/runtime/jit/profile_compilation_info.cc b/runtime/jit/profile_compilation_info.cc index 560131770f..1fad28d1f7 100644 --- a/runtime/jit/profile_compilation_info.cc +++ b/runtime/jit/profile_compilation_info.cc @@ -40,6 +40,7 @@ #include "base/systrace.h" #include "base/time_utils.h" #include "base/unix_file/fd_file.h" +#include "dex_file_loader.h" #include "jit/profiling_info.h" #include "os.h" #include "safe_map.h" @@ -76,20 +77,20 @@ static bool ChecksumMatch(uint32_t dex_file_checksum, uint32_t checksum) { ProfileCompilationInfo::ProfileCompilationInfo(ArenaPool* custom_arena_pool) : default_arena_pool_(), - arena_(custom_arena_pool), - info_(arena_.Adapter(kArenaAllocProfile)), - profile_key_map_(std::less<const std::string>(), arena_.Adapter(kArenaAllocProfile)) { + allocator_(custom_arena_pool), + info_(allocator_.Adapter(kArenaAllocProfile)), + profile_key_map_(std::less<const std::string>(), allocator_.Adapter(kArenaAllocProfile)) { } ProfileCompilationInfo::ProfileCompilationInfo() : default_arena_pool_(/*use_malloc*/true, /*low_4gb*/false, "ProfileCompilationInfo"), - arena_(&default_arena_pool_), - info_(arena_.Adapter(kArenaAllocProfile)), - profile_key_map_(std::less<const std::string>(), arena_.Adapter(kArenaAllocProfile)) { + allocator_(&default_arena_pool_), + info_(allocator_.Adapter(kArenaAllocProfile)), + profile_key_map_(std::less<const std::string>(), allocator_.Adapter(kArenaAllocProfile)) { } ProfileCompilationInfo::~ProfileCompilationInfo() { - VLOG(profiler) << Dumpable<MemStats>(arena_.GetMemStats()); + VLOG(profiler) << Dumpable<MemStats>(allocator_.GetMemStats()); for (DexFileData* data : info_) { delete data; } @@ -569,8 +570,8 @@ ProfileCompilationInfo::DexFileData* ProfileCompilationInfo::GetOrAddDexFileData uint8_t profile_index = profile_index_it->second; if (info_.size() <= profile_index) { // This is a new addition. Add it to the info_ array. - DexFileData* dex_file_data = new (&arena_) DexFileData( - &arena_, + DexFileData* dex_file_data = new (&allocator_) DexFileData( + &allocator_, profile_key, checksum, profile_index, @@ -1537,7 +1538,7 @@ std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>* os << dex_data->profile_key; } else { // Replace the (empty) multidex suffix of the first key with a substitute for easier reading. - std::string multidex_suffix = DexFile::GetMultiDexSuffix(dex_data->profile_key); + std::string multidex_suffix = DexFileLoader::GetMultiDexSuffix(dex_data->profile_key); os << (multidex_suffix.empty() ? kFirstDexFileKeySubstitute : multidex_suffix); } os << " [index=" << static_cast<uint32_t>(dex_data->profile_index) << "]"; @@ -1696,7 +1697,7 @@ bool ProfileCompilationInfo::GenerateTestProfile(int fd, const uint16_t kFavorSplit = 2; for (uint16_t i = 0; i < number_of_dex_files; i++) { - std::string dex_location = DexFile::GetMultiDexLocation(i, base_dex_location.c_str()); + std::string dex_location = DexFileLoader::GetMultiDexLocation(i, base_dex_location.c_str()); std::string profile_key = GetProfileDexFileKey(dex_location); for (uint16_t m = 0; m < number_of_methods; m++) { @@ -1871,7 +1872,7 @@ ProfileCompilationInfo::MethodHotness ProfileCompilationInfo::DexFileData::GetHo ProfileCompilationInfo::DexPcData* ProfileCompilationInfo::FindOrAddDexPc(InlineCacheMap* inline_cache, uint32_t dex_pc) { - return &(inline_cache->FindOrAdd(dex_pc, DexPcData(&arena_))->second); + return &(inline_cache->FindOrAdd(dex_pc, DexPcData(&allocator_))->second); } std::unordered_set<std::string> ProfileCompilationInfo::GetClassDescriptors( diff --git a/runtime/jit/profile_compilation_info.h b/runtime/jit/profile_compilation_info.h index 09de29e394..8889b34610 100644 --- a/runtime/jit/profile_compilation_info.h +++ b/runtime/jit/profile_compilation_info.h @@ -133,10 +133,10 @@ class ProfileCompilationInfo { // megamorphic and its possible types). // If the receiver is megamorphic or is missing types the set of classes will be empty. struct DexPcData : public ArenaObject<kArenaAllocProfile> { - explicit DexPcData(ArenaAllocator* arena) + explicit DexPcData(ArenaAllocator* allocator) : is_missing_types(false), is_megamorphic(false), - classes(std::less<ClassReference>(), arena->Adapter(kArenaAllocProfile)) {} + classes(std::less<ClassReference>(), allocator->Adapter(kArenaAllocProfile)) {} void AddClass(uint16_t dex_profile_idx, const dex::TypeIndex& type_idx); void SetIsMegamorphic() { if (is_missing_types) return; @@ -405,7 +405,7 @@ class ProfileCompilationInfo { static bool Equals(const ProfileCompilationInfo::OfflineProfileMethodInfo& pmi1, const ProfileCompilationInfo::OfflineProfileMethodInfo& pmi2); - ArenaAllocator* GetArena() { return &arena_; } + ArenaAllocator* GetAllocator() { return &allocator_; } // Return all of the class descriptors in the profile for a set of dex files. std::unordered_set<std::string> GetClassDescriptors(const std::vector<const DexFile*>& dex_files); @@ -429,19 +429,19 @@ class ProfileCompilationInfo { // profile_key_map_ and info_. However, it makes the profiles logic much // simpler if we have references here as well. struct DexFileData : public DeletableArenaObject<kArenaAllocProfile> { - DexFileData(ArenaAllocator* arena, + DexFileData(ArenaAllocator* allocator, const std::string& key, uint32_t location_checksum, uint16_t index, uint32_t num_methods) - : arena_(arena), + : arena_(allocator), profile_key(key), profile_index(index), checksum(location_checksum), - method_map(std::less<uint16_t>(), arena->Adapter(kArenaAllocProfile)), - class_set(std::less<dex::TypeIndex>(), arena->Adapter(kArenaAllocProfile)), + method_map(std::less<uint16_t>(), allocator->Adapter(kArenaAllocProfile)), + class_set(std::less<dex::TypeIndex>(), allocator->Adapter(kArenaAllocProfile)), num_method_ids(num_methods), - bitmap_storage(arena->Adapter(kArenaAllocProfile)) { + bitmap_storage(allocator->Adapter(kArenaAllocProfile)) { const size_t num_bits = num_method_ids * kBitmapIndexCount; bitmap_storage.resize(RoundUp(num_bits, kBitsPerByte) / kBitsPerByte); if (!bitmap_storage.empty()) { @@ -698,7 +698,7 @@ class ProfileCompilationInfo { friend class Dex2oatLayoutTest; ArenaPool default_arena_pool_; - ArenaAllocator arena_; + ArenaAllocator allocator_; // Vector containing the actual profile info. // The vector index is the profile index of the dex data and diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc index 2bf8d8b8f8..01853de403 100644 --- a/runtime/jit/profile_saver.cc +++ b/runtime/jit/profile_saver.cc @@ -31,6 +31,7 @@ #include "base/time_utils.h" #include "class_table-inl.h" #include "compiler_filter.h" +#include "dex_file_loader.h" #include "dex_reference_collection.h" #include "gc/collector_type.h" #include "gc/gc_cause.h" @@ -414,7 +415,8 @@ void ProfileSaver::FetchAndCacheResolvedClassesAndMethods(bool startup) { const std::set<std::string>& locations = it.second; for (const auto& pair : hot_methods.GetMap()) { const DexFile* const dex_file = pair.first; - if (locations.find(dex_file->GetBaseLocation()) != locations.end()) { + const std::string base_location = DexFileLoader::GetBaseLocation(dex_file->GetLocation()); + if (locations.find(base_location) != locations.end()) { const MethodReferenceCollection::IndexVector& indices = pair.second; uint8_t flags = Hotness::kFlagHot; flags |= startup ? Hotness::kFlagStartup : Hotness::kFlagPostStartup; @@ -427,7 +429,8 @@ void ProfileSaver::FetchAndCacheResolvedClassesAndMethods(bool startup) { } for (const auto& pair : sampled_methods.GetMap()) { const DexFile* const dex_file = pair.first; - if (locations.find(dex_file->GetBaseLocation()) != locations.end()) { + const std::string base_location = DexFileLoader::GetBaseLocation(dex_file->GetLocation()); + if (locations.find(base_location) != locations.end()) { const MethodReferenceCollection::IndexVector& indices = pair.second; cached_info->AddMethodsForDex(startup ? Hotness::kFlagStartup : Hotness::kFlagPostStartup, dex_file, @@ -437,14 +440,15 @@ void ProfileSaver::FetchAndCacheResolvedClassesAndMethods(bool startup) { } for (const auto& pair : resolved_classes.GetMap()) { const DexFile* const dex_file = pair.first; - if (locations.find(dex_file->GetBaseLocation()) != locations.end()) { + const std::string base_location = DexFileLoader::GetBaseLocation(dex_file->GetLocation()); + if (locations.find(base_location) != locations.end()) { const TypeReferenceCollection::IndexVector& classes = pair.second; VLOG(profiler) << "Added " << classes.size() << " classes for location " - << dex_file->GetBaseLocation() + << base_location << " (" << dex_file->GetLocation() << ")"; cached_info->AddClassesForDex(dex_file, classes.begin(), classes.end()); } else { - VLOG(profiler) << "Location not found " << dex_file->GetBaseLocation() + VLOG(profiler) << "Location not found " << base_location << " (" << dex_file->GetLocation() << ")"; } } diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc index d40e6d94c9..e75d097220 100644 --- a/runtime/native/dalvik_system_DexFile.cc +++ b/runtime/native/dalvik_system_DexFile.cc @@ -26,6 +26,7 @@ #include "common_throws.h" #include "compiler_filter.h" #include "dex_file-inl.h" +#include "dex_file_loader.h" #include "jni_internal.h" #include "mirror/class_loader.h" #include "mirror/object-inl.h" @@ -185,12 +186,12 @@ static const DexFile* CreateDexFile(JNIEnv* env, std::unique_ptr<MemMap> dex_mem dex_mem_map->Begin(), dex_mem_map->End()); std::string error_message; - std::unique_ptr<const DexFile> dex_file(DexFile::Open(location, - 0, - std::move(dex_mem_map), - /* verify */ true, - /* verify_location */ true, - &error_message)); + std::unique_ptr<const DexFile> dex_file(DexFileLoader::Open(location, + 0, + std::move(dex_mem_map), + /* verify */ true, + /* verify_location */ true, + &error_message)); if (dex_file == nullptr) { ScopedObjectAccess soa(env); ThrowWrappedIOException("%s", error_message.c_str()); diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc index 3357fa7a45..70dd5cb56d 100644 --- a/runtime/native/dalvik_system_VMDebug.cc +++ b/runtime/native/dalvik_system_VMDebug.cc @@ -95,10 +95,10 @@ static void VMDebug_startMethodTracingDdmsImpl(JNIEnv*, jclass, jint bufferSize, } static void VMDebug_startMethodTracingFd(JNIEnv* env, jclass, jstring javaTraceFilename, - jobject javaFd, jint bufferSize, jint flags, + jint javaFd, jint bufferSize, jint flags, jboolean samplingEnabled, jint intervalUs, jboolean streamingOutput) { - int originalFd = jniGetFDFromFileDescriptor(env, javaFd); + int originalFd = javaFd; if (originalFd < 0) { return; } @@ -224,9 +224,9 @@ static jlong VMDebug_threadCpuTimeNanos(JNIEnv*, jclass) { * Cause "hprof" data to be dumped. We can throw an IOException if an * error occurs during file handling. */ -static void VMDebug_dumpHprofData(JNIEnv* env, jclass, jstring javaFilename, jobject javaFd) { +static void VMDebug_dumpHprofData(JNIEnv* env, jclass, jstring javaFilename, jint javaFd) { // Only one of these may be null. - if (javaFilename == nullptr && javaFd == nullptr) { + if (javaFilename == nullptr && javaFd < 0) { ScopedObjectAccess soa(env); ThrowNullPointerException("fileName == null && fd == null"); return; @@ -243,15 +243,7 @@ static void VMDebug_dumpHprofData(JNIEnv* env, jclass, jstring javaFilename, job filename = "[fd]"; } - int fd = -1; - if (javaFd != nullptr) { - fd = jniGetFDFromFileDescriptor(env, javaFd); - if (fd < 0) { - ScopedObjectAccess soa(env); - ThrowRuntimeException("Invalid file descriptor"); - return; - } - } + int fd = javaFd; hprof::DumpHeap(filename.c_str(), fd, false); } @@ -537,7 +529,7 @@ static JNINativeMethod gMethods[] = { NATIVE_METHOD(VMDebug, countInstancesOfClass, "(Ljava/lang/Class;Z)J"), NATIVE_METHOD(VMDebug, countInstancesOfClasses, "([Ljava/lang/Class;Z)[J"), NATIVE_METHOD(VMDebug, crash, "()V"), - NATIVE_METHOD(VMDebug, dumpHprofData, "(Ljava/lang/String;Ljava/io/FileDescriptor;)V"), + NATIVE_METHOD(VMDebug, dumpHprofData, "(Ljava/lang/String;I)V"), NATIVE_METHOD(VMDebug, dumpHprofDataDdms, "()V"), NATIVE_METHOD(VMDebug, dumpReferenceTables, "()V"), NATIVE_METHOD(VMDebug, getAllocCount, "(I)I"), @@ -557,7 +549,7 @@ static JNINativeMethod gMethods[] = { NATIVE_METHOD(VMDebug, startEmulatorTracing, "()V"), NATIVE_METHOD(VMDebug, startInstructionCounting, "()V"), NATIVE_METHOD(VMDebug, startMethodTracingDdmsImpl, "(IIZI)V"), - NATIVE_METHOD(VMDebug, startMethodTracingFd, "(Ljava/lang/String;Ljava/io/FileDescriptor;IIZIZ)V"), + NATIVE_METHOD(VMDebug, startMethodTracingFd, "(Ljava/lang/String;IIIZIZ)V"), NATIVE_METHOD(VMDebug, startMethodTracingFilename, "(Ljava/lang/String;IIZI)V"), NATIVE_METHOD(VMDebug, stopAllocCounting, "()V"), NATIVE_METHOD(VMDebug, stopEmulatorTracing, "()V"), diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc index 4034e8c837..413149c510 100644 --- a/runtime/native/java_lang_VMClassLoader.cc +++ b/runtime/native/java_lang_VMClassLoader.cc @@ -17,6 +17,7 @@ #include "java_lang_VMClassLoader.h" #include "class_linker.h" +#include "dex_file_loader.h" #include "jni_internal.h" #include "mirror/class_loader.h" #include "mirror/object-inl.h" @@ -135,7 +136,7 @@ static jobjectArray VMClassLoader_getBootClassPathEntries(JNIEnv* env, jclass) { const DexFile* dex_file = path[i]; // For multidex locations, e.g., x.jar!classes2.dex, we want to look into x.jar. - const std::string& location(dex_file->GetBaseLocation()); + const std::string location(DexFileLoader::GetBaseLocation(dex_file->GetLocation())); ScopedLocalRef<jstring> javaPath(env, env->NewStringUTF(location.c_str())); if (javaPath.get() == nullptr) { diff --git a/runtime/native_dex_file.cc b/runtime/native_dex_file.cc new file mode 100644 index 0000000000..9a93696f1b --- /dev/null +++ b/runtime/native_dex_file.cc @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "native_dex_file.h" + +namespace art { + +const uint8_t NativeDexFile::kDexMagic[] = { 'd', 'e', 'x', '\n' }; +const uint8_t NativeDexFile::kDexMagicVersions[NativeDexFile::kNumDexVersions] + [NativeDexFile::kDexVersionLen] = { + {'0', '3', '5', '\0'}, + // Dex version 036 skipped because of an old dalvik bug on some versions of android where dex + // files with that version number would erroneously be accepted and run. + {'0', '3', '7', '\0'}, + // Dex version 038: Android "O" and beyond. + {'0', '3', '8', '\0'}, + // Dex verion 039: Beyond Android "O". + {'0', '3', '9', '\0'}, +}; + +bool NativeDexFile::IsMagicValid(const uint8_t* magic) { + return (memcmp(magic, kDexMagic, sizeof(kDexMagic)) == 0); +} + +bool NativeDexFile::IsVersionValid(const uint8_t* magic) { + const uint8_t* version = &magic[sizeof(kDexMagic)]; + for (uint32_t i = 0; i < kNumDexVersions; i++) { + if (memcmp(version, kDexMagicVersions[i], kDexVersionLen) == 0) { + return true; + } + } + return false; +} + +bool NativeDexFile::IsMagicValid() const { + return IsMagicValid(header_->magic_); +} + +bool NativeDexFile::IsVersionValid() const { + return IsVersionValid(header_->magic_); +} + +} // namespace art diff --git a/runtime/native_dex_file.h b/runtime/native_dex_file.h new file mode 100644 index 0000000000..8f09e6d7fc --- /dev/null +++ b/runtime/native_dex_file.h @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_NATIVE_DEX_FILE_H_ +#define ART_RUNTIME_NATIVE_DEX_FILE_H_ + +#include <iosfwd> + +#include "dex_file.h" + +namespace art { + +class OatDexFile; + +// Native (ordinary) dex file. This is the format that is packaged in APKs and produced by tools. +class NativeDexFile : public DexFile { + public: + static const uint8_t kDexMagic[kDexMagicSize]; + static constexpr size_t kNumDexVersions = 4; + static const uint8_t kDexMagicVersions[kNumDexVersions][kDexVersionLen]; + + // Returns true if the byte string points to the magic value. + static bool IsMagicValid(const uint8_t* magic); + virtual bool IsMagicValid() const OVERRIDE; + + // Returns true if the byte string after the magic is the correct value. + static bool IsVersionValid(const uint8_t* magic); + virtual bool IsVersionValid() const OVERRIDE; + + private: + NativeDexFile(const uint8_t* base, + size_t size, + const std::string& location, + uint32_t location_checksum, + const OatDexFile* oat_dex_file) + : DexFile(base, size, location, location_checksum, oat_dex_file) {} + + friend class DexFileLoader; + friend class DexFileVerifierTest; + + ART_FRIEND_TEST(ClassLinkerTest, RegisterDexFileName); // for constructor + + DISALLOW_COPY_AND_ASSIGN(NativeDexFile); +}; + +} // namespace art + +#endif // ART_RUNTIME_NATIVE_DEX_FILE_H_ diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc index 734e700c81..ab820fb37f 100644 --- a/runtime/oat_file.cc +++ b/runtime/oat_file.cc @@ -42,6 +42,7 @@ #include "base/systrace.h" #include "base/unix_file/fd_file.h" #include "dex_file_types.h" +#include "dex_file_loader.h" #include "elf_file.h" #include "elf_utils.h" #include "gc_root.h" @@ -49,6 +50,7 @@ #include "mem_map.h" #include "mirror/class.h" #include "mirror/object-inl.h" +#include "native_dex_file.h" #include "oat.h" #include "oat_file-inl.h" #include "oat_file_manager.h" @@ -552,7 +554,9 @@ bool OatFileBase::Setup(const char* abs_dex_location, std::string* error_msg) { } const uint8_t* dex_file_pointer = DexBegin() + dex_file_offset; - if (UNLIKELY(!DexFile::IsMagicValid(dex_file_pointer))) { + + const bool valid_magic = NativeDexFile::IsMagicValid(dex_file_pointer); + if (UNLIKELY(!valid_magic)) { *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' with invalid " "dex file magic '%s'", GetLocation().c_str(), @@ -561,7 +565,7 @@ bool OatFileBase::Setup(const char* abs_dex_location, std::string* error_msg) { dex_file_pointer); return false; } - if (UNLIKELY(!DexFile::IsVersionValid(dex_file_pointer))) { + if (UNLIKELY(!NativeDexFile::IsVersionValid(dex_file_pointer))) { *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' with invalid " "dex file version '%s'", GetLocation().c_str(), @@ -705,7 +709,8 @@ bool OatFileBase::Setup(const char* abs_dex_location, std::string* error_msg) { reinterpret_cast<const DexFile::Header*>(dex_file_pointer)->method_ids_size_); } - std::string canonical_location = DexFile::GetDexCanonicalLocation(dex_file_location.c_str()); + std::string canonical_location = + DexFileLoader::GetDexCanonicalLocation(dex_file_location.c_str()); // Create the OatDexFile and add it to the owning container. OatDexFile* oat_dex_file = new OatDexFile(this, @@ -1226,8 +1231,8 @@ std::string OatFile::ResolveRelativeEncodedDexLocation( const char* abs_dex_location, const std::string& rel_dex_location) { if (abs_dex_location != nullptr && rel_dex_location[0] != '/') { // Strip :classes<N>.dex used for secondary multidex files. - std::string base = DexFile::GetBaseLocation(rel_dex_location); - std::string multidex_suffix = DexFile::GetMultiDexSuffix(rel_dex_location); + std::string base = DexFileLoader::GetBaseLocation(rel_dex_location); + std::string multidex_suffix = DexFileLoader::GetMultiDexSuffix(rel_dex_location); // Check if the base is a suffix of the provided abs_dex_location. std::string target_suffix = "/" + base; @@ -1481,7 +1486,7 @@ const OatFile::OatDexFile* OatFile::GetOatDexFile(const char* dex_location, oat_dex_file = secondary_lb->second; // May be null. } else { // We haven't seen this dex_location before, we must check the canonical location. - std::string dex_canonical_location = DexFile::GetDexCanonicalLocation(dex_location); + std::string dex_canonical_location = DexFileLoader::GetDexCanonicalLocation(dex_location); if (dex_canonical_location != dex_location) { StringPiece canonical_key(dex_canonical_location); auto canonical_it = oat_dex_files_.find(canonical_key); @@ -1499,7 +1504,7 @@ const OatFile::OatDexFile* OatFile::GetOatDexFile(const char* dex_location, if (oat_dex_file == nullptr) { if (error_msg != nullptr) { - std::string dex_canonical_location = DexFile::GetDexCanonicalLocation(dex_location); + std::string dex_canonical_location = DexFileLoader::GetDexCanonicalLocation(dex_location); *error_msg = "Failed to find OatDexFile for DexFile " + std::string(dex_location) + " (canonical path " + dex_canonical_location + ") in OatFile " + GetLocation(); } @@ -1509,7 +1514,7 @@ const OatFile::OatDexFile* OatFile::GetOatDexFile(const char* dex_location, if (dex_location_checksum != nullptr && oat_dex_file->GetDexFileLocationChecksum() != *dex_location_checksum) { if (error_msg != nullptr) { - std::string dex_canonical_location = DexFile::GetDexCanonicalLocation(dex_location); + std::string dex_canonical_location = DexFileLoader::GetDexCanonicalLocation(dex_location); std::string checksum = StringPrintf("0x%08x", oat_dex_file->GetDexFileLocationChecksum()); std::string required_checksum = StringPrintf("0x%08x", *dex_location_checksum); *error_msg = "OatDexFile for DexFile " + std::string(dex_location) @@ -1565,14 +1570,14 @@ std::unique_ptr<const DexFile> OatFile::OatDexFile::OpenDexFile(std::string* err ScopedTrace trace(__PRETTY_FUNCTION__); static constexpr bool kVerify = false; static constexpr bool kVerifyChecksum = false; - return DexFile::Open(dex_file_pointer_, - FileSize(), - dex_file_location_, - dex_file_location_checksum_, - this, - kVerify, - kVerifyChecksum, - error_msg); + return DexFileLoader::Open(dex_file_pointer_, + FileSize(), + dex_file_location_, + dex_file_location_checksum_, + this, + kVerify, + kVerifyChecksum, + error_msg); } uint32_t OatFile::OatDexFile::GetOatClassOffset(uint16_t class_def_index) const { diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc index e3c4cffaa8..a7fe9b1205 100644 --- a/runtime/oat_file_assistant.cc +++ b/runtime/oat_file_assistant.cc @@ -27,6 +27,7 @@ #include "base/stl_util.h" #include "class_linker.h" #include "compiler_filter.h" +#include "dex_file_loader.h" #include "exec_utils.h" #include "gc/heap.h" #include "gc/space/image_space.h" @@ -351,7 +352,7 @@ bool OatFileAssistant::LoadDexFiles( // Load the rest of the multidex entries for (size_t i = 1;; i++) { - std::string multidex_dex_location = DexFile::GetMultiDexLocation(i, dex_location.c_str()); + std::string multidex_dex_location = DexFileLoader::GetMultiDexLocation(i, dex_location.c_str()); oat_dex_file = oat_file.GetOatDexFile(multidex_dex_location.c_str(), nullptr); if (oat_dex_file == nullptr) { // There are no more multidex entries to load. @@ -403,7 +404,7 @@ bool OatFileAssistant::DexChecksumUpToDate(const VdexFile& file, std::string* er uint32_t expected_checksum = (*required_dex_checksums)[i]; uint32_t actual_checksum = file.GetLocationChecksum(i); if (expected_checksum != actual_checksum) { - std::string dex = DexFile::GetMultiDexLocation(i, dex_location_.c_str()); + std::string dex = DexFileLoader::GetMultiDexLocation(i, dex_location_.c_str()); *error_msg = StringPrintf("Dex checksum does not match for dex: %s." "Expected: %u, actual: %u", dex.c_str(), @@ -432,7 +433,7 @@ bool OatFileAssistant::DexChecksumUpToDate(const OatFile& file, std::string* err } for (uint32_t i = 0; i < number_of_dex_files; i++) { - std::string dex = DexFile::GetMultiDexLocation(i, dex_location_.c_str()); + std::string dex = DexFileLoader::GetMultiDexLocation(i, dex_location_.c_str()); uint32_t expected_checksum = (*required_dex_checksums)[i]; const OatFile::OatDexFile* oat_dex_file = file.GetOatDexFile(dex.c_str(), nullptr); if (oat_dex_file == nullptr) { @@ -865,9 +866,9 @@ const std::vector<uint32_t>* OatFileAssistant::GetRequiredDexChecksums() { required_dex_checksums_found_ = false; cached_required_dex_checksums_.clear(); std::string error_msg; - if (DexFile::GetMultiDexChecksums(dex_location_.c_str(), - &cached_required_dex_checksums_, - &error_msg)) { + if (DexFileLoader::GetMultiDexChecksums(dex_location_.c_str(), + &cached_required_dex_checksums_, + &error_msg)) { required_dex_checksums_found_ = true; has_original_dex_files_ = true; } else { @@ -881,7 +882,7 @@ const std::vector<uint32_t>* OatFileAssistant::GetRequiredDexChecksums() { if (odex_file != nullptr) { required_dex_checksums_found_ = true; for (size_t i = 0; i < odex_file->GetOatHeader().GetDexFileCount(); i++) { - std::string dex = DexFile::GetMultiDexLocation(i, dex_location_.c_str()); + std::string dex = DexFileLoader::GetMultiDexLocation(i, dex_location_.c_str()); const OatFile::OatDexFile* odex_dex_file = odex_file->GetOatDexFile(dex.c_str(), nullptr); if (odex_dex_file == nullptr) { required_dex_checksums_found_ = false; diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc index 7cabae55e4..1e7cf723dc 100644 --- a/runtime/oat_file_manager.cc +++ b/runtime/oat_file_manager.cc @@ -31,6 +31,7 @@ #include "class_linker.h" #include "class_loader_context.h" #include "dex_file-inl.h" +#include "dex_file_loader.h" #include "dex_file_tracking_registrar.h" #include "gc/scoped_gc_critical_section.h" #include "gc/space/image_space.h" @@ -94,7 +95,7 @@ const OatFile* OatFileManager::FindOpenedOatFileFromDexLocation( for (const std::unique_ptr<const OatFile>& oat_file : oat_files_) { const std::vector<const OatDexFile*>& oat_dex_files = oat_file->GetOatDexFiles(); for (const OatDexFile* oat_dex_file : oat_dex_files) { - if (DexFile::GetBaseLocation(oat_dex_file->GetDexFileLocation()) == dex_base_location) { + if (DexFileLoader::GetBaseLocation(oat_dex_file->GetDexFileLocation()) == dex_base_location) { return oat_file.get(); } } @@ -435,8 +436,13 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat( // Update the oat file on disk if we can, based on the --compiler-filter // option derived from the current runtime options. // This may fail, but that's okay. Best effort is all that matters here. - switch (oat_file_assistant.MakeUpToDate(/*profile_changed*/false, - context.get(), + // TODO(calin): b/64530081 b/66984396. Pass a null context to verify and compile + // secondary dex files in isolation (and avoid to extract/verify the main apk + // if it's in the class path). Note this trades correctness for performance + // since the resulting slow down is unacceptable in some cases until b/64530081 + // is fixed. + switch (oat_file_assistant.MakeUpToDate(/*profile_changed*/ false, + /*class_loader_context*/ nullptr, /*out*/ &error_msg)) { case OatFileAssistant::kUpdateFailed: LOG(WARNING) << error_msg; @@ -591,7 +597,7 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat( if (oat_file_assistant.HasOriginalDexFiles()) { if (Runtime::Current()->IsDexFileFallbackEnabled()) { static constexpr bool kVerifyChecksum = true; - if (!DexFile::Open( + if (!DexFileLoader::Open( dex_location, dex_location, kVerifyChecksum, /*out*/ &error_msg, &dex_files)) { LOG(WARNING) << error_msg; error_msgs->push_back("Failed to open dex files from " + std::string(dex_location) diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc index 1d524fd5e6..9888186ed0 100644 --- a/runtime/parsed_options.cc +++ b/runtime/parsed_options.cc @@ -310,6 +310,9 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize .Define("-XX:ThreadSuspendTimeout=_") // in ms .WithType<MillisecondsToNanoseconds>() // store as ns .IntoKey(M::ThreadSuspendTimeout) + .Define("-XX:GlobalRefAllocStackTraceLimit=_") // Number of free slots to enable tracing. + .WithType<unsigned int>() + .IntoKey(M::GlobalRefAllocStackTraceLimit) .Define("-XX:SlowDebug=_") .WithType<bool>() .WithValueMap({{"false", false}, {"true", true}}) diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 7c05cb6174..a4ed21e450 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -67,6 +67,7 @@ #include "class_linker-inl.h" #include "compiler_callbacks.h" #include "debugger.h" +#include "dex_file_loader.h" #include "elf_file.h" #include "entrypoints/runtime_asm_entrypoints.h" #include "experimental_flags.h" @@ -1020,7 +1021,7 @@ static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames, LOG(WARNING) << "Skipping non-existent dex file '" << dex_filename << "'"; continue; } - if (!DexFile::Open(dex_filename, dex_location, kVerifyChecksum, &error_msg, dex_files)) { + if (!DexFileLoader::Open(dex_filename, dex_location, kVerifyChecksum, &error_msg, dex_files)) { LOG(WARNING) << "Failed to open .dex from file '" << dex_filename << "': " << error_msg; ++failure_count; } diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def index 78a60faa3a..cafae22e8c 100644 --- a/runtime/runtime_options.def +++ b/runtime/runtime_options.def @@ -145,4 +145,6 @@ RUNTIME_OPTIONS_KEY (void (*)(), HookAbort, nullpt RUNTIME_OPTIONS_KEY (bool, SlowDebug, false) +RUNTIME_OPTIONS_KEY (unsigned int, GlobalRefAllocStackTraceLimit, 0) // 0 = off + #undef RUNTIME_OPTIONS_KEY diff --git a/runtime/utils.cc b/runtime/utils.cc index b72dec62bd..1f6bd742b6 100644 --- a/runtime/utils.cc +++ b/runtime/utils.cc @@ -48,6 +48,7 @@ #include "base/stl_util.h" #include "base/unix_file/fd_file.h" #include "dex_file-inl.h" +#include "dex_file_loader.h" #include "dex_instruction.h" #include "oat_quick_method_header.h" #include "os.h" @@ -858,7 +859,7 @@ bool GetDalvikCacheFilename(const char* location, const char* cache_location, !android::base::EndsWith(location, ".art") && !android::base::EndsWith(location, ".oat")) { cache_file += "/"; - cache_file += DexFile::kClassesDex; + cache_file += DexFileLoader::kClassesDex; } std::replace(cache_file.begin(), cache_file.end(), '/', '@'); *filename = StringPrintf("%s/%s", cache_location, cache_file.c_str()); diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc index b95522062e..55bc9ecac5 100644 --- a/runtime/vdex_file.cc +++ b/runtime/vdex_file.cc @@ -25,6 +25,7 @@ #include "base/stl_util.h" #include "base/unix_file/fd_file.h" #include "dex_file.h" +#include "dex_file_loader.h" #include "dex_to_dex_decompiler.h" namespace art { @@ -151,15 +152,15 @@ bool VdexFile::OpenAllDexFiles(std::vector<std::unique_ptr<const DexFile>>* dex_ size_t size = reinterpret_cast<const DexFile::Header*>(dex_file_start)->file_size_; // TODO: Supply the location information for a vdex file. static constexpr char kVdexLocation[] = ""; - std::string location = DexFile::GetMultiDexLocation(i, kVdexLocation); - std::unique_ptr<const DexFile> dex(DexFile::Open(dex_file_start, - size, - location, - GetLocationChecksum(i), - nullptr /*oat_dex_file*/, - false /*verify*/, - false /*verify_checksum*/, - error_msg)); + std::string location = DexFileLoader::GetMultiDexLocation(i, kVdexLocation); + std::unique_ptr<const DexFile> dex(DexFileLoader::Open(dex_file_start, + size, + location, + GetLocationChecksum(i), + nullptr /*oat_dex_file*/, + false /*verify*/, + false /*verify_checksum*/, + error_msg)); if (dex == nullptr) { return false; } diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h index 8afbe78c68..57ab56cf27 100644 --- a/runtime/verifier/method_verifier.h +++ b/runtime/verifier/method_verifier.h @@ -221,7 +221,7 @@ class MethodVerifier { return IsConstructor() && !IsStatic(); } - ScopedArenaAllocator& GetArena() { + ScopedArenaAllocator& GetScopedAllocator() { return arena_; } diff --git a/runtime/verifier/reg_type-inl.h b/runtime/verifier/reg_type-inl.h index 704d2a86af..631c6bd7ef 100644 --- a/runtime/verifier/reg_type-inl.h +++ b/runtime/verifier/reg_type-inl.h @@ -199,8 +199,8 @@ inline const UndefinedType* UndefinedType::GetInstance() { return instance_; } -inline void* RegType::operator new(size_t size, ScopedArenaAllocator* arena) { - return arena->Alloc(size, kArenaAllocMisc); +inline void* RegType::operator new(size_t size, ScopedArenaAllocator* allocator) { + return allocator->Alloc(size, kArenaAllocMisc); } } // namespace verifier diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h index c5d8ff5131..a2085a3f09 100644 --- a/runtime/verifier/reg_type.h +++ b/runtime/verifier/reg_type.h @@ -264,8 +264,8 @@ class RegType { return ::operator new(size); } - static void* operator new(size_t size, ArenaAllocator* arena) = delete; - static void* operator new(size_t size, ScopedArenaAllocator* arena); + static void* operator new(size_t size, ArenaAllocator* allocator) = delete; + static void* operator new(size_t size, ScopedArenaAllocator* allocator); enum class AssignmentType { kBoolean, diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h index 3da1680c80..a9c9428581 100644 --- a/runtime/verifier/register_line-inl.h +++ b/runtime/verifier/register_line-inl.h @@ -178,14 +178,15 @@ inline size_t RegisterLine::ComputeSize(size_t num_regs) { } inline RegisterLine* RegisterLine::Create(size_t num_regs, MethodVerifier* verifier) { - void* memory = verifier->GetArena().Alloc(ComputeSize(num_regs)); + void* memory = verifier->GetScopedAllocator().Alloc(ComputeSize(num_regs)); return new (memory) RegisterLine(num_regs, verifier); } inline RegisterLine::RegisterLine(size_t num_regs, MethodVerifier* verifier) : num_regs_(num_regs), - monitors_(verifier->GetArena().Adapter(kArenaAllocVerifier)), - reg_to_lock_depths_(std::less<uint32_t>(), verifier->GetArena().Adapter(kArenaAllocVerifier)), + monitors_(verifier->GetScopedAllocator().Adapter(kArenaAllocVerifier)), + reg_to_lock_depths_(std::less<uint32_t>(), + verifier->GetScopedAllocator().Adapter(kArenaAllocVerifier)), this_initialized_(false) { std::uninitialized_fill_n(line_, num_regs_, 0u); SetResultTypeToUnknown(verifier); diff --git a/test/1914-get-local-instance/expected.txt b/test/1914-get-local-instance/expected.txt index 4117942392..09f0df1937 100644 --- a/test/1914-get-local-instance/expected.txt +++ b/test/1914-get-local-instance/expected.txt @@ -10,3 +10,6 @@ Running public void art.Test1914$TargetClass.InstanceMethod(java.lang.Runnable) Running public native void art.Test1914$TargetClass.NativeInstanceMethod(java.lang.Runnable) with "GetThis" on remote thread. "GetThis" on public native void art.Test1914$TargetClass.NativeInstanceMethod(java.lang.Runnable) got value: TargetClass("NativeInstanceMethodObject") Value is 'TargetClass("NativeInstanceMethodObject")' (class: class art.Test1914$TargetClass) +Running public abstract void art.Test1914$Foo.InterfaceProxyMethod(java.lang.Runnable) with "GetThis" on remote thread. +"GetThis" on public abstract void art.Test1914$Foo.InterfaceProxyMethod(java.lang.Runnable) got value: Proxy for [interface art.Test1914$Foo] + Value is 'Proxy for [interface art.Test1914$Foo]' (class: PROXY CLASS) diff --git a/test/1914-get-local-instance/src/art/Test1914.java b/test/1914-get-local-instance/src/art/Test1914.java index c09f519db8..e47f9cbf38 100644 --- a/test/1914-get-local-instance/src/art/Test1914.java +++ b/test/1914-get-local-instance/src/art/Test1914.java @@ -18,7 +18,9 @@ package art; import java.lang.reflect.Constructor; import java.lang.reflect.Executable; +import java.lang.reflect.InvocationHandler; import java.lang.reflect.Method; +import java.lang.reflect.Proxy; import java.nio.ByteBuffer; import java.util.concurrent.Semaphore; import java.util.Arrays; @@ -35,7 +37,7 @@ public class Test1914 { public static void reportValue(Object val) { System.out.println("\tValue is '" + val + "' (class: " - + (val != null ? val.getClass() : "NULL") + ")"); + + (val != null ? (val instanceof Proxy ? "PROXY CLASS" : val.getClass()) : "NULL") + ")"); } public static void StaticMethod(Runnable safepoint) { @@ -151,7 +153,10 @@ public class Test1914 { private StackTrace.StackFrameData findStackFrame(Thread thr) { for (StackTrace.StackFrameData frame : StackTrace.GetStackTrace(thr)) { - if (frame.method.equals(target)) { + if (frame.method.equals(target) || + (frame.method.getName().equals(target.getName()) && + Arrays.deepEquals(frame.method.getParameterTypes(), target.getParameterTypes()) && + ((Method)frame.method).getReturnType().equals(target.getReturnType()))) { return frame; } } @@ -163,6 +168,25 @@ public class Test1914 { return klass.getDeclaredMethod(name, Runnable.class); } + public static interface Foo { + public void InterfaceProxyMethod(Runnable r); + } + + public static Object getProxyObject(final Class... k) { + return Proxy.newProxyInstance( + Test1914.class.getClassLoader(), + k, + (p, m, a) -> { + if (m.getName().equals("toString")) { + return "Proxy for " + Arrays.toString(k); + } else { + ((Runnable)a[0]).run(); + reportValue(p); + return null; + } + }); + } + public static void run() throws Exception { Locals.EnableLocalVariableAccess(); final TestCase[] MAIN_TEST_CASES = new TestCase[] { @@ -172,6 +196,8 @@ public class Test1914 { getMethod(TargetClass.class, "InstanceMethod")), new TestCase(new TargetClass("NativeInstanceMethodObject"), getMethod(TargetClass.class, "NativeInstanceMethod")), + new TestCase(getProxyObject(Foo.class), + getMethod(Foo.class, "InterfaceProxyMethod")), }; for (TestCase t: MAIN_TEST_CASES) { diff --git a/test/1929-exception-catch-exception/build b/test/1929-exception-catch-exception/build index 42b99ad9f8..10ffcc537d 100644 --- a/test/1929-exception-catch-exception/build +++ b/test/1929-exception-catch-exception/build @@ -15,6 +15,6 @@ # limitations under the License. # See b/65168732 -export DX=$ANDROID_HOST_OUT/bin/dx +export USE_D8=false ./default-build "$@" diff --git a/test/1937-transform-soft-fail/check b/test/1937-transform-soft-fail/check new file mode 100755 index 0000000000..7cee530291 --- /dev/null +++ b/test/1937-transform-soft-fail/check @@ -0,0 +1,19 @@ +#!/bin/bash +# +# Copyright (C) 2017 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +sed -e 's/:.*$//' "$2" > "$2.tmp" + +./default-check "$1" "$2.tmp" diff --git a/test/1937-transform-soft-fail/expected.txt b/test/1937-transform-soft-fail/expected.txt new file mode 100644 index 0000000000..f0f6ac80fc --- /dev/null +++ b/test/1937-transform-soft-fail/expected.txt @@ -0,0 +1,3 @@ +hello +throwing +Caught exception java.lang.NoSuchMethodError diff --git a/test/1937-transform-soft-fail/info.txt b/test/1937-transform-soft-fail/info.txt new file mode 100644 index 0000000000..875a5f6ec1 --- /dev/null +++ b/test/1937-transform-soft-fail/info.txt @@ -0,0 +1 @@ +Tests basic functions in the jvmti plugin. diff --git a/test/1937-transform-soft-fail/run b/test/1937-transform-soft-fail/run new file mode 100755 index 0000000000..c6e62ae6cd --- /dev/null +++ b/test/1937-transform-soft-fail/run @@ -0,0 +1,17 @@ +#!/bin/bash +# +# Copyright 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +./default-run "$@" --jvmti diff --git a/test/1937-transform-soft-fail/src/Main.java b/test/1937-transform-soft-fail/src/Main.java new file mode 100644 index 0000000000..e3541b3b9d --- /dev/null +++ b/test/1937-transform-soft-fail/src/Main.java @@ -0,0 +1,21 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class Main { + public static void main(String[] args) throws Exception { + art.Test1937.run(); + } +} diff --git a/test/1937-transform-soft-fail/src/art/Redefinition.java b/test/1937-transform-soft-fail/src/art/Redefinition.java new file mode 100644 index 0000000000..56d2938a01 --- /dev/null +++ b/test/1937-transform-soft-fail/src/art/Redefinition.java @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package art; + +import java.util.ArrayList; +// Common Redefinition functions. Placed here for use by CTS +public class Redefinition { + public static final class CommonClassDefinition { + public final Class<?> target; + public final byte[] class_file_bytes; + public final byte[] dex_file_bytes; + + public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) { + this.target = target; + this.class_file_bytes = class_file_bytes; + this.dex_file_bytes = dex_file_bytes; + } + } + + // A set of possible test configurations. Test should set this if they need to. + // This must be kept in sync with the defines in ti-agent/common_helper.cc + public static enum Config { + COMMON_REDEFINE(0), + COMMON_RETRANSFORM(1), + COMMON_TRANSFORM(2); + + private final int val; + private Config(int val) { + this.val = val; + } + } + + public static void setTestConfiguration(Config type) { + nativeSetTestConfiguration(type.val); + } + + private static native void nativeSetTestConfiguration(int type); + + // Transforms the class + public static native void doCommonClassRedefinition(Class<?> target, + byte[] classfile, + byte[] dexfile); + + public static void doMultiClassRedefinition(CommonClassDefinition... defs) { + ArrayList<Class<?>> classes = new ArrayList<>(); + ArrayList<byte[]> class_files = new ArrayList<>(); + ArrayList<byte[]> dex_files = new ArrayList<>(); + + for (CommonClassDefinition d : defs) { + classes.add(d.target); + class_files.add(d.class_file_bytes); + dex_files.add(d.dex_file_bytes); + } + doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]), + class_files.toArray(new byte[0][]), + dex_files.toArray(new byte[0][])); + } + + public static void addMultiTransformationResults(CommonClassDefinition... defs) { + for (CommonClassDefinition d : defs) { + addCommonTransformationResult(d.target.getCanonicalName(), + d.class_file_bytes, + d.dex_file_bytes); + } + } + + public static native void doCommonMultiClassRedefinition(Class<?>[] targets, + byte[][] classfiles, + byte[][] dexfiles); + public static native void doCommonClassRetransformation(Class<?>... target); + public static native void setPopRetransformations(boolean pop); + public static native void popTransformationFor(String name); + public static native void enableCommonRetransformation(boolean enable); + public static native void addCommonTransformationResult(String target_name, + byte[] class_bytes, + byte[] dex_bytes); +} diff --git a/test/1937-transform-soft-fail/src/art/Test1937.java b/test/1937-transform-soft-fail/src/art/Test1937.java new file mode 100644 index 0000000000..7255a5ef96 --- /dev/null +++ b/test/1937-transform-soft-fail/src/art/Test1937.java @@ -0,0 +1,89 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package art; + +import java.util.Base64; +public class Test1937 { + + static class Transform { + public void sayHi() { + // Use lower 'h' to make sure the string will have a different string id + // than the transformation (the transformation code is the same except + // the actual printed String, which was making the test inacurately passing + // in JIT mode when loading the string from the dex cache, as the string ids + // of the two different strings were the same). + // We know the string ids will be different because lexicographically: + // "Goodbye" < "LTransform;" < "hello". + System.out.println("hello"); + } + } + + /** + * base64 encoded class/dex file for + * class Transform { + * public void sayHi() { + * System.out.println("throwing"); + * Redefinition.notPresent(); + * } + * } + */ + private static final byte[] CLASS_BYTES = Base64.getDecoder().decode( + "yv66vgAAADQAJQoABwAPCQAQABEIABIKABMAFAoAFQAWBwAYBwAbAQAGPGluaXQ+AQADKClWAQAE" + + "Q29kZQEAD0xpbmVOdW1iZXJUYWJsZQEABXNheUhpAQAKU291cmNlRmlsZQEADVRlc3QxOTM3Lmph" + + "dmEMAAgACQcAHAwAHQAeAQAIdGhyb3dpbmcHAB8MACAAIQcAIgwAIwAJBwAkAQAWYXJ0L1Rlc3Qx" + + "OTM3JFRyYW5zZm9ybQEACVRyYW5zZm9ybQEADElubmVyQ2xhc3NlcwEAEGphdmEvbGFuZy9PYmpl" + + "Y3QBABBqYXZhL2xhbmcvU3lzdGVtAQADb3V0AQAVTGphdmEvaW8vUHJpbnRTdHJlYW07AQATamF2" + + "YS9pby9QcmludFN0cmVhbQEAB3ByaW50bG4BABUoTGphdmEvbGFuZy9TdHJpbmc7KVYBABBhcnQv" + + "UmVkZWZpbml0aW9uAQAKbm90UHJlc2VudAEADGFydC9UZXN0MTkzNwAgAAYABwAAAAAAAgAAAAgA" + + "CQABAAoAAAAdAAEAAQAAAAUqtwABsQAAAAEACwAAAAYAAQAAACMAAQAMAAkAAQAKAAAALAACAAEA" + + "AAAMsgACEgO2AAS4AAWxAAAAAQALAAAADgADAAAAJQAIACYACwAnAAIADQAAAAIADgAaAAAACgAB" + + "AAYAFwAZAAg="); + private static final byte[] DEX_BYTES = Base64.getDecoder().decode( + "ZGV4CjAzNQDfmxvwUHv7EEBCvzjdM/uAviWG8eIsKIbsAwAAcAAAAHhWNBIAAAAAAAAAACgDAAAW" + + "AAAAcAAAAAoAAADIAAAAAgAAAPAAAAABAAAACAEAAAUAAAAQAQAAAQAAADgBAACUAgAAWAEAALoB" + + "AADCAQAA1gEAAPABAAAAAgAAJAIAAEQCAABbAgAAbwIAAIMCAACXAgAApgIAALECAAC0AgAAuAIA" + + "AMUCAADLAgAA1wIAANwCAADlAgAA7AIAAPYCAAABAAAAAgAAAAMAAAAEAAAABQAAAAYAAAAHAAAA" + + "CAAAAAkAAAAMAAAADAAAAAkAAAAAAAAADQAAAAkAAAC0AQAACAAFABEAAAAAAAAAEAAAAAEAAAAA" + + "AAAAAQAAABMAAAAFAAEAEgAAAAYAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAoAAACkAQAAGAMAAAAA" + + "AAACAAAACQMAAA8DAAABAAEAAQAAAP0CAAAEAAAAcBAEAAAADgADAAEAAgAAAAIDAAALAAAAYgAA" + + "ABoBFABuIAMAEABxAAAAAAAOAAAAWAEAAAAAAAAAAAAAAAAAAAEAAAAHAAY8aW5pdD4AEkxhcnQv" + + "UmVkZWZpbml0aW9uOwAYTGFydC9UZXN0MTkzNyRUcmFuc2Zvcm07AA5MYXJ0L1Rlc3QxOTM3OwAi" + + "TGRhbHZpay9hbm5vdGF0aW9uL0VuY2xvc2luZ0NsYXNzOwAeTGRhbHZpay9hbm5vdGF0aW9uL0lu" + + "bmVyQ2xhc3M7ABVMamF2YS9pby9QcmludFN0cmVhbTsAEkxqYXZhL2xhbmcvT2JqZWN0OwASTGph" + + "dmEvbGFuZy9TdHJpbmc7ABJMamF2YS9sYW5nL1N5c3RlbTsADVRlc3QxOTM3LmphdmEACVRyYW5z" + + "Zm9ybQABVgACVkwAC2FjY2Vzc0ZsYWdzAARuYW1lAApub3RQcmVzZW50AANvdXQAB3ByaW50bG4A" + + "BXNheUhpAAh0aHJvd2luZwAFdmFsdWUAIwAHDgAlAAcOeDwAAgMBFRgCAgQCDgQIDxcLAAABAQGA" + + "gATkAgIB/AIAABAAAAAAAAAAAQAAAAAAAAABAAAAFgAAAHAAAAACAAAACgAAAMgAAAADAAAAAgAA" + + "APAAAAAEAAAAAQAAAAgBAAAFAAAABQAAABABAAAGAAAAAQAAADgBAAADEAAAAQAAAFgBAAABIAAA" + + "AgAAAGQBAAAGIAAAAQAAAKQBAAABEAAAAQAAALQBAAACIAAAFgAAALoBAAADIAAAAgAAAP0CAAAE" + + "IAAAAgAAAAkDAAAAIAAAAQAAABgDAAAAEAAAAQAAACgDAAA="); + + public static void run() { + Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE); + doTest(new Transform()); + } + + public static void doTest(Transform t) { + t.sayHi(); + Redefinition.doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES); + try { + t.sayHi(); + } catch (Throwable e) { + System.out.println("Caught exception " + e.getClass().getName() + ": " + e.getMessage()); + } + } +} diff --git a/test/1938-transform-abstract-single-impl/expected.txt b/test/1938-transform-abstract-single-impl/expected.txt new file mode 100644 index 0000000000..6a06f9baab --- /dev/null +++ b/test/1938-transform-abstract-single-impl/expected.txt @@ -0,0 +1,4 @@ +JNI_OnLoad called +Running sayHi() - hello +redefining TransformAbstract +Running sayHi() - Goodbye diff --git a/test/1938-transform-abstract-single-impl/info.txt b/test/1938-transform-abstract-single-impl/info.txt new file mode 100644 index 0000000000..5df8306981 --- /dev/null +++ b/test/1938-transform-abstract-single-impl/info.txt @@ -0,0 +1,2 @@ +Tests that single-implementation abstract methods don't crash the runtime when +their declaring class is redefined. diff --git a/test/1938-transform-abstract-single-impl/run b/test/1938-transform-abstract-single-impl/run new file mode 100755 index 0000000000..adb1a1c507 --- /dev/null +++ b/test/1938-transform-abstract-single-impl/run @@ -0,0 +1,17 @@ +#!/bin/bash +# +# Copyright 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +./default-run "$@" --jvmti --no-app-image diff --git a/test/1938-transform-abstract-single-impl/src/Main.java b/test/1938-transform-abstract-single-impl/src/Main.java new file mode 100644 index 0000000000..7ac2172eb4 --- /dev/null +++ b/test/1938-transform-abstract-single-impl/src/Main.java @@ -0,0 +1,100 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import art.Redefinition; +import java.util.Base64; +public class Main { + static abstract class TransformAbstract { + public abstract void doSayHi(); + + public void sayHi() { + System.out.println("hello"); + } + } + + static final class TransformConcrete extends TransformAbstract { + public final void doSayHi() { + System.out.print("Running sayHi() - "); + sayHi(); + } + } + + public static native void ensureJitCompiled(Class k, String m); + + /** + * base64 encoded class/dex file for + * static abstract class TransformAbstract { + * public abstract void doSayHi(); + * public void sayHi() { + * System.out.println("Goodbye"); + * } + * } + */ + private static final byte[] CLASS_BYTES = Base64.getDecoder().decode( + "yv66vgAAADQAIQoABgAPCQAQABEIABIKABMAFAcAFgcAGQEABjxpbml0PgEAAygpVgEABENvZGUB" + + "AA9MaW5lTnVtYmVyVGFibGUBAAdkb1NheUhpAQAFc2F5SGkBAApTb3VyY2VGaWxlAQAJTWFpbi5q" + + "YXZhDAAHAAgHABoMABsAHAEAB0dvb2RieWUHAB0MAB4AHwcAIAEAFk1haW4kVHJhbnNmb3JtQWJz" + + "dHJhY3QBABFUcmFuc2Zvcm1BYnN0cmFjdAEADElubmVyQ2xhc3NlcwEAEGphdmEvbGFuZy9PYmpl" + + "Y3QBABBqYXZhL2xhbmcvU3lzdGVtAQADb3V0AQAVTGphdmEvaW8vUHJpbnRTdHJlYW07AQATamF2" + + "YS9pby9QcmludFN0cmVhbQEAB3ByaW50bG4BABUoTGphdmEvbGFuZy9TdHJpbmc7KVYBAARNYWlu" + + "BCAABQAGAAAAAAADAAAABwAIAAEACQAAAB0AAQABAAAABSq3AAGxAAAAAQAKAAAABgABAAAAHAQB" + + "AAsACAAAAAEADAAIAAEACQAAACUAAgABAAAACbIAAhIDtgAEsQAAAAEACgAAAAoAAgAAAB8ACAAg" + + "AAIADQAAAAIADgAYAAAACgABAAUAFQAXBAg="); + private static final byte[] DEX_BYTES = Base64.getDecoder().decode( + "ZGV4CjAzNQCQkoTiKzIz0l96rtsnUxdY4Kwx+YINWFHEAwAAcAAAAHhWNBIAAAAAAAAAAAADAAAV" + + "AAAAcAAAAAkAAADEAAAAAgAAAOgAAAABAAAAAAEAAAUAAAAIAQAAAQAAADABAAB0AgAAUAEAAKoB" + + "AACyAQAAuwEAANUBAADdAQAAAQIAACECAAA4AgAATAIAAGACAAB0AgAAfwIAAJICAACVAgAAmQIA" + + "AKYCAACvAgAAtQIAALoCAADDAgAAygIAAAIAAAADAAAABAAAAAUAAAAGAAAABwAAAAgAAAAJAAAA" + + "DAAAAAwAAAAIAAAAAAAAAA0AAAAIAAAApAEAAAcABAARAAAAAAAAAAAAAAAAAAAADwAAAAAAAAAT" + + "AAAABAABABIAAAAFAAAAAAAAAAAAAAAABAAABQAAAAAAAAAKAAAAlAEAAOwCAAAAAAAAAgAAANwC" + + "AADiAgAAAQABAAEAAADRAgAABAAAAHAQBAAAAA4AAwABAAIAAADWAgAACAAAAGIAAAAaAQEAbiAD" + + "ABAADgBQAQAAAAAAAAAAAAAAAAAAAQAAAAYABjxpbml0PgAHR29vZGJ5ZQAYTE1haW4kVHJhbnNm" + + "b3JtQWJzdHJhY3Q7AAZMTWFpbjsAIkxkYWx2aWsvYW5ub3RhdGlvbi9FbmNsb3NpbmdDbGFzczsA" + + "HkxkYWx2aWsvYW5ub3RhdGlvbi9Jbm5lckNsYXNzOwAVTGphdmEvaW8vUHJpbnRTdHJlYW07ABJM" + + "amF2YS9sYW5nL09iamVjdDsAEkxqYXZhL2xhbmcvU3RyaW5nOwASTGphdmEvbGFuZy9TeXN0ZW07" + + "AAlNYWluLmphdmEAEVRyYW5zZm9ybUFic3RyYWN0AAFWAAJWTAALYWNjZXNzRmxhZ3MAB2RvU2F5" + + "SGkABG5hbWUAA291dAAHcHJpbnRsbgAFc2F5SGkABXZhbHVlABwABw4AHwAHDngAAgIBFBgBAgMC" + + "DiQIBBAXCwAAAQIAgIAE3AIBgQgAAQH0AgAAEAAAAAAAAAABAAAAAAAAAAEAAAAVAAAAcAAAAAIA" + + "AAAJAAAAxAAAAAMAAAACAAAA6AAAAAQAAAABAAAAAAEAAAUAAAAFAAAACAEAAAYAAAABAAAAMAEA" + + "AAMQAAABAAAAUAEAAAEgAAACAAAAXAEAAAYgAAABAAAAlAEAAAEQAAABAAAApAEAAAIgAAAVAAAA" + + "qgEAAAMgAAACAAAA0QIAAAQgAAACAAAA3AIAAAAgAAABAAAA7AIAAAAQAAABAAAAAAMAAA=="); + + public static void main(String[] args) throws Exception { + System.loadLibrary(args[0]); + Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE); + + ensureJitCompiled(TransformAbstract.class, "sayHi"); + ensureJitCompiled(TransformConcrete.class, "doSayHi"); + + TransformAbstract t1 = new TransformConcrete(); + t1.doSayHi(); + + assertSingleImplementation(TransformAbstract.class, "doSayHi", true); + + System.out.println("redefining TransformAbstract"); + Redefinition.doCommonClassRedefinition(TransformAbstract.class, CLASS_BYTES, DEX_BYTES); + + t1.doSayHi(); + } + + private static native boolean hasSingleImplementation(Class<?> clazz, String method_name); + private static void assertSingleImplementation(Class<?> clazz, String method_name, boolean b) { + if (hasSingleImplementation(clazz, method_name) != b) { + System.out.println(clazz + "." + method_name + + " doesn't have single implementation value of " + b); + } + } +} diff --git a/test/1938-transform-abstract-single-impl/src/art/Redefinition.java b/test/1938-transform-abstract-single-impl/src/art/Redefinition.java new file mode 100644 index 0000000000..56d2938a01 --- /dev/null +++ b/test/1938-transform-abstract-single-impl/src/art/Redefinition.java @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package art; + +import java.util.ArrayList; +// Common Redefinition functions. Placed here for use by CTS +public class Redefinition { + public static final class CommonClassDefinition { + public final Class<?> target; + public final byte[] class_file_bytes; + public final byte[] dex_file_bytes; + + public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) { + this.target = target; + this.class_file_bytes = class_file_bytes; + this.dex_file_bytes = dex_file_bytes; + } + } + + // A set of possible test configurations. Test should set this if they need to. + // This must be kept in sync with the defines in ti-agent/common_helper.cc + public static enum Config { + COMMON_REDEFINE(0), + COMMON_RETRANSFORM(1), + COMMON_TRANSFORM(2); + + private final int val; + private Config(int val) { + this.val = val; + } + } + + public static void setTestConfiguration(Config type) { + nativeSetTestConfiguration(type.val); + } + + private static native void nativeSetTestConfiguration(int type); + + // Transforms the class + public static native void doCommonClassRedefinition(Class<?> target, + byte[] classfile, + byte[] dexfile); + + public static void doMultiClassRedefinition(CommonClassDefinition... defs) { + ArrayList<Class<?>> classes = new ArrayList<>(); + ArrayList<byte[]> class_files = new ArrayList<>(); + ArrayList<byte[]> dex_files = new ArrayList<>(); + + for (CommonClassDefinition d : defs) { + classes.add(d.target); + class_files.add(d.class_file_bytes); + dex_files.add(d.dex_file_bytes); + } + doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]), + class_files.toArray(new byte[0][]), + dex_files.toArray(new byte[0][])); + } + + public static void addMultiTransformationResults(CommonClassDefinition... defs) { + for (CommonClassDefinition d : defs) { + addCommonTransformationResult(d.target.getCanonicalName(), + d.class_file_bytes, + d.dex_file_bytes); + } + } + + public static native void doCommonMultiClassRedefinition(Class<?>[] targets, + byte[][] classfiles, + byte[][] dexfiles); + public static native void doCommonClassRetransformation(Class<?>... target); + public static native void setPopRetransformations(boolean pop); + public static native void popTransformationFor(String name); + public static native void enableCommonRetransformation(boolean enable); + public static native void addCommonTransformationResult(String target_name, + byte[] class_bytes, + byte[] dex_bytes); +} diff --git a/test/1939-proxy-frames/expected.txt b/test/1939-proxy-frames/expected.txt new file mode 100644 index 0000000000..a4c97c9bbe --- /dev/null +++ b/test/1939-proxy-frames/expected.txt @@ -0,0 +1,8 @@ +Running public abstract void art.Test1939$Foo.InterfaceProxyMethod(java.lang.Runnable) with "GetThis" on remote thread. +"GetThis" on public abstract void art.Test1939$Foo.InterfaceProxyMethod(java.lang.Runnable) got value: Proxy for [interface art.Test1939$Foo] +Running public abstract void art.Test1939$Foo.InterfaceProxyMethod(java.lang.Runnable) with "GetLocalReference0" on remote thread. +"GetLocalReference0" on public abstract void art.Test1939$Foo.InterfaceProxyMethod(java.lang.Runnable) failed due to JVMTI_ERROR_OPAQUE_FRAME +Running public abstract void art.Test1939$Foo.InterfaceProxyMethod(java.lang.Runnable) with "GetProxyFrameLocation" on remote thread. +"GetProxyFrameLocation" on public abstract void art.Test1939$Foo.InterfaceProxyMethod(java.lang.Runnable) got value: -1 +Running public abstract void art.Test1939$Foo.InterfaceProxyMethod(java.lang.Runnable) with "GetProxyFrameMethod" on remote thread. +"GetProxyFrameMethod" on public abstract void art.Test1939$Foo.InterfaceProxyMethod(java.lang.Runnable) got value: public final void $Proxy0.InterfaceProxyMethod(java.lang.Runnable) diff --git a/test/1939-proxy-frames/info.txt b/test/1939-proxy-frames/info.txt new file mode 100644 index 0000000000..9fc3d62cd6 --- /dev/null +++ b/test/1939-proxy-frames/info.txt @@ -0,0 +1,2 @@ +Test for jvmti get local instance + diff --git a/test/1939-proxy-frames/local_instance.cc b/test/1939-proxy-frames/local_instance.cc new file mode 100644 index 0000000000..dc833bfd90 --- /dev/null +++ b/test/1939-proxy-frames/local_instance.cc @@ -0,0 +1,66 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <iostream> +#include <pthread.h> +#include <stdio.h> +#include <vector> + +#include "android-base/logging.h" +#include "jni.h" +#include "scoped_local_ref.h" +#include "scoped_primitive_array.h" + +#include "jvmti.h" + +// Test infrastructure +#include "jvmti_helper.h" +#include "test_env.h" + +namespace art { +namespace Test1939ProxyFrames { + +extern "C" JNIEXPORT jobject Java_art_Test1939_GetFrameMethod(JNIEnv* env, + jclass, + jthread thr, + jint depth) { + jmethodID m = nullptr; + jlong loc = -1; + if (JvmtiErrorToException(env, jvmti_env, jvmti_env->GetFrameLocation(thr, depth, &m, &loc))) { + return nullptr; + } + jclass klass = nullptr; + if (JvmtiErrorToException(env, jvmti_env, jvmti_env->GetMethodDeclaringClass(m, &klass))) { + return nullptr; + } + jobject res = env->ToReflectedMethod(klass, m, false); + env->DeleteLocalRef(klass); + return res; +} + +extern "C" JNIEXPORT jlong Java_art_Test1939_GetFrameLocation(JNIEnv* env, + jclass, + jthread thr, + jint depth) { + jmethodID m = nullptr; + jlong loc = -1; + JvmtiErrorToException(env, jvmti_env, jvmti_env->GetFrameLocation(thr, depth, &m, &loc)); + return loc; +} + +} // namespace Test1939ProxyFrames +} // namespace art + diff --git a/test/1939-proxy-frames/run b/test/1939-proxy-frames/run new file mode 100755 index 0000000000..51875a7e86 --- /dev/null +++ b/test/1939-proxy-frames/run @@ -0,0 +1,18 @@ +#!/bin/bash +# +# Copyright 2017 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Ask for stack traces to be dumped to a file rather than to stdout. +./default-run "$@" --jvmti diff --git a/test/1939-proxy-frames/src/Main.java b/test/1939-proxy-frames/src/Main.java new file mode 100644 index 0000000000..85cab34d53 --- /dev/null +++ b/test/1939-proxy-frames/src/Main.java @@ -0,0 +1,21 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class Main { + public static void main(String[] args) throws Exception { + art.Test1939.run(); + } +} diff --git a/test/1939-proxy-frames/src/art/Breakpoint.java b/test/1939-proxy-frames/src/art/Breakpoint.java new file mode 100644 index 0000000000..bbb89f707f --- /dev/null +++ b/test/1939-proxy-frames/src/art/Breakpoint.java @@ -0,0 +1,202 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package art; + +import java.lang.reflect.Executable; +import java.util.HashSet; +import java.util.Set; +import java.util.Objects; + +public class Breakpoint { + public static class Manager { + public static class BP { + public final Executable method; + public final long location; + + public BP(Executable method) { + this(method, getStartLocation(method)); + } + + public BP(Executable method, long location) { + this.method = method; + this.location = location; + } + + @Override + public boolean equals(Object other) { + return (other instanceof BP) && + method.equals(((BP)other).method) && + location == ((BP)other).location; + } + + @Override + public String toString() { + return method.toString() + " @ " + getLine(); + } + + @Override + public int hashCode() { + return Objects.hash(method, location); + } + + public int getLine() { + try { + LineNumber[] lines = getLineNumberTable(method); + int best = -1; + for (LineNumber l : lines) { + if (l.location > location) { + break; + } else { + best = l.line; + } + } + return best; + } catch (Exception e) { + return -1; + } + } + } + + private Set<BP> breaks = new HashSet<>(); + + public void setBreakpoints(BP... bs) { + for (BP b : bs) { + if (breaks.add(b)) { + Breakpoint.setBreakpoint(b.method, b.location); + } + } + } + public void setBreakpoint(Executable method, long location) { + setBreakpoints(new BP(method, location)); + } + + public void clearBreakpoints(BP... bs) { + for (BP b : bs) { + if (breaks.remove(b)) { + Breakpoint.clearBreakpoint(b.method, b.location); + } + } + } + public void clearBreakpoint(Executable method, long location) { + clearBreakpoints(new BP(method, location)); + } + + public void clearAllBreakpoints() { + clearBreakpoints(breaks.toArray(new BP[0])); + } + } + + public static void startBreakpointWatch(Class<?> methodClass, + Executable breakpointReached, + Thread thr) { + startBreakpointWatch(methodClass, breakpointReached, false, thr); + } + + /** + * Enables the trapping of breakpoint events. + * + * If allowRecursive == true then breakpoints will be sent even if one is currently being handled. + */ + public static native void startBreakpointWatch(Class<?> methodClass, + Executable breakpointReached, + boolean allowRecursive, + Thread thr); + public static native void stopBreakpointWatch(Thread thr); + + public static final class LineNumber implements Comparable<LineNumber> { + public final long location; + public final int line; + + private LineNumber(long loc, int line) { + this.location = loc; + this.line = line; + } + + public boolean equals(Object other) { + return other instanceof LineNumber && ((LineNumber)other).line == line && + ((LineNumber)other).location == location; + } + + public int compareTo(LineNumber other) { + int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line)); + if (v != 0) { + return v; + } else { + return Long.valueOf(location).compareTo(Long.valueOf(other.location)); + } + } + } + + public static native void setBreakpoint(Executable m, long loc); + public static void setBreakpoint(Executable m, LineNumber l) { + setBreakpoint(m, l.location); + } + + public static native void clearBreakpoint(Executable m, long loc); + public static void clearBreakpoint(Executable m, LineNumber l) { + clearBreakpoint(m, l.location); + } + + private static native Object[] getLineNumberTableNative(Executable m); + public static LineNumber[] getLineNumberTable(Executable m) { + Object[] nativeTable = getLineNumberTableNative(m); + long[] location = (long[])(nativeTable[0]); + int[] lines = (int[])(nativeTable[1]); + if (lines.length != location.length) { + throw new Error("Lines and locations have different lengths!"); + } + LineNumber[] out = new LineNumber[lines.length]; + for (int i = 0; i < lines.length; i++) { + out[i] = new LineNumber(location[i], lines[i]); + } + return out; + } + + public static native long getStartLocation(Executable m); + + public static int locationToLine(Executable m, long location) { + try { + Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m); + int best = -1; + for (Breakpoint.LineNumber l : lines) { + if (l.location > location) { + break; + } else { + best = l.line; + } + } + return best; + } catch (Exception e) { + return -1; + } + } + + public static long lineToLocation(Executable m, int line) throws Exception { + try { + Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m); + for (Breakpoint.LineNumber l : lines) { + if (l.line == line) { + return l.location; + } + } + throw new Exception("Unable to find line " + line + " in " + m); + } catch (Exception e) { + throw new Exception("Unable to get line number info for " + m, e); + } + } +} + diff --git a/test/1939-proxy-frames/src/art/Locals.java b/test/1939-proxy-frames/src/art/Locals.java new file mode 100644 index 0000000000..22e21be398 --- /dev/null +++ b/test/1939-proxy-frames/src/art/Locals.java @@ -0,0 +1,121 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package art; + +import java.lang.reflect.Executable; +import java.util.Objects; + +public class Locals { + public static native void EnableLocalVariableAccess(); + + public static class VariableDescription { + public final long start_location; + public final int length; + public final String name; + public final String signature; + public final String generic_signature; + public final int slot; + + public VariableDescription( + long start, int length, String name, String sig, String gen_sig, int slot) { + this.start_location = start; + this.length = length; + this.name = name; + this.signature = sig; + this.generic_signature = gen_sig; + this.slot = slot; + } + + @Override + public String toString() { + return String.format( + "VariableDescription { " + + "Sig: '%s', Name: '%s', Gen_sig: '%s', slot: %d, start: %d, len: %d" + + "}", + this.signature, + this.name, + this.generic_signature, + this.slot, + this.start_location, + this.length); + } + public boolean equals(Object other) { + if (!(other instanceof VariableDescription)) { + return false; + } else { + VariableDescription v = (VariableDescription)other; + return Objects.equals(v.signature, signature) && + Objects.equals(v.name, name) && + Objects.equals(v.generic_signature, generic_signature) && + v.slot == slot && + v.start_location == start_location && + v.length == length; + } + } + public int hashCode() { + return Objects.hash(this.signature, this.name, this.generic_signature, this.slot, + this.start_location, this.length); + } + } + + public static native VariableDescription[] GetLocalVariableTable(Executable e); + + public static VariableDescription GetVariableAtLine( + Executable e, String name, String sig, int line) throws Exception { + return GetVariableAtLocation(e, name, sig, Breakpoint.lineToLocation(e, line)); + } + + public static VariableDescription GetVariableAtLocation( + Executable e, String name, String sig, long loc) { + VariableDescription[] vars = GetLocalVariableTable(e); + for (VariableDescription var : vars) { + if (var.start_location <= loc && + var.length + var.start_location > loc && + var.name.equals(name) && + var.signature.equals(sig)) { + return var; + } + } + throw new Error( + "Unable to find variable " + name + " (sig: " + sig + ") in " + e + " at loc " + loc); + } + + public static native int GetLocalVariableInt(Thread thr, int depth, int slot); + public static native long GetLocalVariableLong(Thread thr, int depth, int slot); + public static native float GetLocalVariableFloat(Thread thr, int depth, int slot); + public static native double GetLocalVariableDouble(Thread thr, int depth, int slot); + public static native Object GetLocalVariableObject(Thread thr, int depth, int slot); + public static native Object GetLocalInstance(Thread thr, int depth); + + public static void SetLocalVariableInt(Thread thr, int depth, int slot, Object val) { + SetLocalVariableInt(thr, depth, slot, ((Number)val).intValue()); + } + public static void SetLocalVariableLong(Thread thr, int depth, int slot, Object val) { + SetLocalVariableLong(thr, depth, slot, ((Number)val).longValue()); + } + public static void SetLocalVariableFloat(Thread thr, int depth, int slot, Object val) { + SetLocalVariableFloat(thr, depth, slot, ((Number)val).floatValue()); + } + public static void SetLocalVariableDouble(Thread thr, int depth, int slot, Object val) { + SetLocalVariableDouble(thr, depth, slot, ((Number)val).doubleValue()); + } + public static native void SetLocalVariableInt(Thread thr, int depth, int slot, int val); + public static native void SetLocalVariableLong(Thread thr, int depth, int slot, long val); + public static native void SetLocalVariableFloat(Thread thr, int depth, int slot, float val); + public static native void SetLocalVariableDouble(Thread thr, int depth, int slot, double val); + public static native void SetLocalVariableObject(Thread thr, int depth, int slot, Object val); +} diff --git a/test/1939-proxy-frames/src/art/StackTrace.java b/test/1939-proxy-frames/src/art/StackTrace.java new file mode 100644 index 0000000000..2ea2f201e8 --- /dev/null +++ b/test/1939-proxy-frames/src/art/StackTrace.java @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package art; + +import java.lang.reflect.Field; +import java.lang.reflect.Executable; + +public class StackTrace { + public static class StackFrameData { + public final Thread thr; + public final Executable method; + public final long current_location; + public final int depth; + + public StackFrameData(Thread thr, Executable e, long loc, int depth) { + this.thr = thr; + this.method = e; + this.current_location = loc; + this.depth = depth; + } + @Override + public String toString() { + return String.format( + "StackFrameData { thr: '%s', method: '%s', loc: %d, depth: %d }", + this.thr, + this.method, + this.current_location, + this.depth); + } + } + + public static native int GetStackDepth(Thread thr); + + private static native StackFrameData[] nativeGetStackTrace(Thread thr); + + public static StackFrameData[] GetStackTrace(Thread thr) { + // The RI seems to give inconsistent (and sometimes nonsensical) results if the thread is not + // suspended. The spec says that not being suspended is fine but since we want this to be + // consistent we will suspend for the RI. + boolean suspend_thread = + !System.getProperty("java.vm.name").equals("Dalvik") && + !thr.equals(Thread.currentThread()) && + !Suspension.isSuspended(thr); + if (suspend_thread) { + Suspension.suspend(thr); + } + StackFrameData[] out = nativeGetStackTrace(thr); + if (suspend_thread) { + Suspension.resume(thr); + } + return out; + } +} + diff --git a/test/1939-proxy-frames/src/art/Suspension.java b/test/1939-proxy-frames/src/art/Suspension.java new file mode 100644 index 0000000000..16e62ccac9 --- /dev/null +++ b/test/1939-proxy-frames/src/art/Suspension.java @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package art; + +public class Suspension { + // Suspends a thread using jvmti. + public native static void suspend(Thread thr); + + // Resumes a thread using jvmti. + public native static void resume(Thread thr); + + public native static boolean isSuspended(Thread thr); + + public native static int[] suspendList(Thread... threads); + public native static int[] resumeList(Thread... threads); +} diff --git a/test/1939-proxy-frames/src/art/Test1939.java b/test/1939-proxy-frames/src/art/Test1939.java new file mode 100644 index 0000000000..83d0d2ca4b --- /dev/null +++ b/test/1939-proxy-frames/src/art/Test1939.java @@ -0,0 +1,175 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package art; + +import java.lang.reflect.Constructor; +import java.lang.reflect.Executable; +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; +import java.nio.ByteBuffer; +import java.util.concurrent.Semaphore; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Set; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.function.Supplier; +import java.util.function.Consumer; + +public class Test1939 { + public static interface SafepointFunction { + public void invoke( + Thread thread, + Method target, + int depth) throws Exception; + } + + public static interface GetterFunction { + public Object GetVar(Thread t, int depth); + } + + public static SafepointFunction NamedGet(final String type, final GetterFunction get) { + return new SafepointFunction() { + public void invoke(Thread t, Method method, int depth) { + try { + Object res = get.GetVar(t, depth); + System.out.println(this + " on " + method + " got value: " + res); + } catch (Exception e) { + System.out.println(this + " on " + method + " failed due to " + e.getMessage()); + } + } + public String toString() { + return "\"Get" + type + "\""; + } + }; + } + + public static class TestCase { + public final Object thiz; + public final Method target; + + public TestCase(Method target) { + this(null, target); + } + public TestCase(Object thiz, Method target) { + this.thiz = thiz; + this.target = target; + } + + public static class ThreadPauser implements Runnable { + public final Semaphore sem_wakeup_main; + public final Semaphore sem_wait; + + public ThreadPauser() { + sem_wakeup_main = new Semaphore(0); + sem_wait = new Semaphore(0); + } + + public void run() { + try { + sem_wakeup_main.release(); + sem_wait.acquire(); + } catch (Exception e) { + throw new Error("Error with semaphores!", e); + } + } + + public void waitForOtherThreadToPause() throws Exception { + sem_wakeup_main.acquire(); + } + + public void wakeupOtherThread() throws Exception { + sem_wait.release(); + } + } + + public void exec(final SafepointFunction safepoint) throws Exception { + System.out.println("Running " + target + " with " + safepoint + " on remote thread."); + final ThreadPauser pause = new ThreadPauser(); + Thread remote = new Thread( + () -> { + try { + target.invoke(thiz, pause); + } catch (Exception e) { + throw new Error("Error invoking remote thread " + Thread.currentThread(), e); + } + }, + "remote thread for " + target + " with " + safepoint); + remote.start(); + pause.waitForOtherThreadToPause(); + try { + Suspension.suspend(remote); + StackTrace.StackFrameData frame = findStackFrame(remote); + safepoint.invoke(remote, target, frame.depth); + } finally { + Suspension.resume(remote); + pause.wakeupOtherThread(); + remote.join(); + } + } + + private StackTrace.StackFrameData findStackFrame(Thread thr) { + for (StackTrace.StackFrameData frame : StackTrace.GetStackTrace(thr)) { + if (frame.method.equals(target) || + (frame.method.getName().equals(target.getName()) && + Arrays.deepEquals(frame.method.getParameterTypes(), target.getParameterTypes()) && + ((Method)frame.method).getReturnType().equals(target.getReturnType()))) { + return frame; + } + } + throw new Error("Unable to find stack frame in method " + target + " on thread " + thr); + } + } + + public static Method getMethod(Class<?> klass, String name) throws Exception { + return klass.getDeclaredMethod(name, Runnable.class); + } + + public static interface Foo { + public void InterfaceProxyMethod(Runnable r); + } + + public static Object getProxyObject(final Class... k) { + return Proxy.newProxyInstance( + Test1939.class.getClassLoader(), + k, + (p, m, a) -> { + if (m.getName().equals("toString")) { + return "Proxy for " + Arrays.toString(k); + } else { + ((Runnable)a[0]).run(); + return null; + } + }); + } + + public static void run() throws Exception { + Locals.EnableLocalVariableAccess(); + TestCase test = new TestCase( + getProxyObject(Foo.class), getMethod(Foo.class, "InterfaceProxyMethod")); + test.exec(NamedGet("This", Locals::GetLocalInstance)); + test.exec(NamedGet("LocalReference0", (t, d) -> Locals.GetLocalVariableObject(t, d, 0))); + test.exec(NamedGet("ProxyFrameLocation", (t, d) -> Long.valueOf(GetFrameLocation(t, d)))); + test.exec(NamedGet("ProxyFrameMethod", Test1939::GetFrameMethod)); + } + + public static native long GetFrameLocation(Thread thr, int depth); + public static native Executable GetFrameMethod(Thread thr, int depth); +} + diff --git a/test/450-checker-types/build b/test/450-checker-types/build index 947ec9a560..3721955670 100755 --- a/test/450-checker-types/build +++ b/test/450-checker-types/build @@ -21,6 +21,6 @@ export USE_JACK=false export DESUGAR=false # See b/65168732 -export DX=$ANDROID_HOST_OUT/bin/dx +export USE_D8=false ./default-build "$@" diff --git a/test/458-checker-instruct-simplification/build b/test/458-checker-instruct-simplification/build index 947ec9a560..3721955670 100755 --- a/test/458-checker-instruct-simplification/build +++ b/test/458-checker-instruct-simplification/build @@ -21,6 +21,6 @@ export USE_JACK=false export DESUGAR=false # See b/65168732 -export DX=$ANDROID_HOST_OUT/bin/dx +export USE_D8=false ./default-build "$@" diff --git a/test/463-checker-boolean-simplifier/build b/test/463-checker-boolean-simplifier/build index 947ec9a560..3721955670 100755 --- a/test/463-checker-boolean-simplifier/build +++ b/test/463-checker-boolean-simplifier/build @@ -21,6 +21,6 @@ export USE_JACK=false export DESUGAR=false # See b/65168732 -export DX=$ANDROID_HOST_OUT/bin/dx +export USE_D8=false ./default-build "$@" diff --git a/test/476-checker-ctor-fence-redun-elim/build b/test/476-checker-ctor-fence-redun-elim/build index 42b99ad9f8..10ffcc537d 100644 --- a/test/476-checker-ctor-fence-redun-elim/build +++ b/test/476-checker-ctor-fence-redun-elim/build @@ -15,6 +15,6 @@ # limitations under the License. # See b/65168732 -export DX=$ANDROID_HOST_OUT/bin/dx +export USE_D8=false ./default-build "$@" diff --git a/test/482-checker-loop-back-edge-use/build b/test/482-checker-loop-back-edge-use/build index 42b99ad9f8..10ffcc537d 100644 --- a/test/482-checker-loop-back-edge-use/build +++ b/test/482-checker-loop-back-edge-use/build @@ -15,6 +15,6 @@ # limitations under the License. # See b/65168732 -export DX=$ANDROID_HOST_OUT/bin/dx +export USE_D8=false ./default-build "$@" diff --git a/test/484-checker-register-hints/build b/test/484-checker-register-hints/build index 42b99ad9f8..10ffcc537d 100644 --- a/test/484-checker-register-hints/build +++ b/test/484-checker-register-hints/build @@ -15,6 +15,6 @@ # limitations under the License. # See b/65168732 -export DX=$ANDROID_HOST_OUT/bin/dx +export USE_D8=false ./default-build "$@" diff --git a/test/530-checker-lse/build b/test/530-checker-lse/build index 42b99ad9f8..10ffcc537d 100755 --- a/test/530-checker-lse/build +++ b/test/530-checker-lse/build @@ -15,6 +15,6 @@ # limitations under the License. # See b/65168732 -export DX=$ANDROID_HOST_OUT/bin/dx +export USE_D8=false ./default-build "$@" diff --git a/test/549-checker-types-merge/build b/test/549-checker-types-merge/build index 42b99ad9f8..10ffcc537d 100644 --- a/test/549-checker-types-merge/build +++ b/test/549-checker-types-merge/build @@ -15,6 +15,6 @@ # limitations under the License. # See b/65168732 -export DX=$ANDROID_HOST_OUT/bin/dx +export USE_D8=false ./default-build "$@" diff --git a/test/565-checker-doublenegbitwise/build b/test/565-checker-doublenegbitwise/build index 947ec9a560..3721955670 100755 --- a/test/565-checker-doublenegbitwise/build +++ b/test/565-checker-doublenegbitwise/build @@ -21,6 +21,6 @@ export USE_JACK=false export DESUGAR=false # See b/65168732 -export DX=$ANDROID_HOST_OUT/bin/dx +export USE_D8=false ./default-build "$@" diff --git a/test/565-checker-rotate/build b/test/565-checker-rotate/build index 42b99ad9f8..10ffcc537d 100644 --- a/test/565-checker-rotate/build +++ b/test/565-checker-rotate/build @@ -15,6 +15,6 @@ # limitations under the License. # See b/65168732 -export DX=$ANDROID_HOST_OUT/bin/dx +export USE_D8=false ./default-build "$@" diff --git a/test/566-checker-signum/build b/test/566-checker-signum/build index 42b99ad9f8..10ffcc537d 100644 --- a/test/566-checker-signum/build +++ b/test/566-checker-signum/build @@ -15,6 +15,6 @@ # limitations under the License. # See b/65168732 -export DX=$ANDROID_HOST_OUT/bin/dx +export USE_D8=false ./default-build "$@" diff --git a/test/567-checker-compare/build b/test/567-checker-compare/build index 1d269dcdb2..10ffcc537d 100644 --- a/test/567-checker-compare/build +++ b/test/567-checker-compare/build @@ -15,6 +15,6 @@ # limitations under the License. # See b/65168732 -export DX=$ANDROID_HOST_OUT/bin/dx +export USE_D8=false ./default-build "$@" diff --git a/test/570-checker-osr/build b/test/570-checker-osr/build index 42b99ad9f8..10ffcc537d 100644 --- a/test/570-checker-osr/build +++ b/test/570-checker-osr/build @@ -15,6 +15,6 @@ # limitations under the License. # See b/65168732 -export DX=$ANDROID_HOST_OUT/bin/dx +export USE_D8=false ./default-build "$@" diff --git a/test/586-checker-null-array-get/build b/test/586-checker-null-array-get/build index 947ec9a560..3721955670 100755 --- a/test/586-checker-null-array-get/build +++ b/test/586-checker-null-array-get/build @@ -21,6 +21,6 @@ export USE_JACK=false export DESUGAR=false # See b/65168732 -export DX=$ANDROID_HOST_OUT/bin/dx +export USE_D8=false ./default-build "$@" diff --git a/test/593-checker-boolean-2-integral-conv/build b/test/593-checker-boolean-2-integral-conv/build index 947ec9a560..3721955670 100755 --- a/test/593-checker-boolean-2-integral-conv/build +++ b/test/593-checker-boolean-2-integral-conv/build @@ -21,6 +21,6 @@ export USE_JACK=false export DESUGAR=false # See b/65168732 -export DX=$ANDROID_HOST_OUT/bin/dx +export USE_D8=false ./default-build "$@" diff --git a/test/597-deopt-invoke-stub/run b/test/597-deopt-invoke-stub/run index bc04498bfe..53b7c4cc71 100644 --- a/test/597-deopt-invoke-stub/run +++ b/test/597-deopt-invoke-stub/run @@ -14,5 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -# We want to run in debuggable mode and compiled. -exec ${RUN} --jit -Xcompiler-option --debuggable "${@}" +# In order to test deoptimizing at quick-to-interpreter bridge, +# we want to run in debuggable mode with jit compilation. +# We also bump up the jit threshold to 10 to make sure that the method +# that should be interpreted is not compiled. +exec ${RUN} --jit --runtime-option -Xjitthreshold:10000 -Xcompiler-option --debuggable "${@}" diff --git a/test/611-checker-simplify-if/build b/test/611-checker-simplify-if/build index 42b99ad9f8..10ffcc537d 100644 --- a/test/611-checker-simplify-if/build +++ b/test/611-checker-simplify-if/build @@ -15,6 +15,6 @@ # limitations under the License. # See b/65168732 -export DX=$ANDROID_HOST_OUT/bin/dx +export USE_D8=false ./default-build "$@" diff --git a/test/618-checker-induction/build b/test/618-checker-induction/build index 42b99ad9f8..10ffcc537d 100644 --- a/test/618-checker-induction/build +++ b/test/618-checker-induction/build @@ -15,6 +15,6 @@ # limitations under the License. # See b/65168732 -export DX=$ANDROID_HOST_OUT/bin/dx +export USE_D8=false ./default-build "$@" diff --git a/test/910-methods/build b/test/910-methods/build index 42b99ad9f8..10ffcc537d 100644 --- a/test/910-methods/build +++ b/test/910-methods/build @@ -15,6 +15,6 @@ # limitations under the License. # See b/65168732 -export DX=$ANDROID_HOST_OUT/bin/dx +export USE_D8=false ./default-build "$@" diff --git a/test/910-methods/check b/test/910-methods/check index f9552ada26..e6f7d7773f 100644 --- a/test/910-methods/check +++ b/test/910-methods/check @@ -19,7 +19,7 @@ if [[ "$USE_JACK" == true ]]; then patch -p0 expected.txt < expected_jack.diff fi -if [[ "$DX" == 'd8' ]]; then +if [[ "$USE_D8" == true ]]; then patch -p0 expected.txt < expected_d8.diff fi diff --git a/test/911-get-stack-trace/build b/test/911-get-stack-trace/build index 42b99ad9f8..10ffcc537d 100644 --- a/test/911-get-stack-trace/build +++ b/test/911-get-stack-trace/build @@ -15,6 +15,6 @@ # limitations under the License. # See b/65168732 -export DX=$ANDROID_HOST_OUT/bin/dx +export USE_D8=false ./default-build "$@" diff --git a/test/913-heaps/build b/test/913-heaps/build index 42b99ad9f8..10ffcc537d 100644 --- a/test/913-heaps/build +++ b/test/913-heaps/build @@ -15,6 +15,6 @@ # limitations under the License. # See b/65168732 -export DX=$ANDROID_HOST_OUT/bin/dx +export USE_D8=false ./default-build "$@" diff --git a/test/983-source-transform-verify/source_transform.cc b/test/983-source-transform-verify/source_transform.cc index 570ade364d..ef67acec98 100644 --- a/test/983-source-transform-verify/source_transform.cc +++ b/test/983-source-transform-verify/source_transform.cc @@ -29,6 +29,7 @@ #include "base/macros.h" #include "bytecode_utils.h" #include "dex_file.h" +#include "dex_file_loader.h" #include "dex_instruction.h" #include "jit/jit.h" #include "native_stack_dump.h" @@ -66,14 +67,14 @@ void JNICALL CheckDexFileHook(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED, return; } std::string error; - std::unique_ptr<const DexFile> dex(DexFile::Open(class_data, - class_data_len, - "fake_location.dex", - /*location_checksum*/ 0, - /*oat_dex_file*/ nullptr, - /*verify*/ true, - /*verify_checksum*/ true, - &error)); + std::unique_ptr<const DexFile> dex(DexFileLoader::Open(class_data, + class_data_len, + "fake_location.dex", + /*location_checksum*/ 0, + /*oat_dex_file*/ nullptr, + /*verify*/ true, + /*verify_checksum*/ true, + &error)); if (dex.get() == nullptr) { std::cout << "Failed to verify dex file for " << name << " because " << error << std::endl; return; diff --git a/test/Android.bp b/test/Android.bp index 31474d5107..b737345729 100644 --- a/test/Android.bp +++ b/test/Android.bp @@ -277,6 +277,7 @@ art_cc_defaults { "1930-monitor-info/monitor.cc", "1932-monitor-events-misc/monitor_misc.cc", "1934-jvmti-signal-thread/signal_threads.cc", + "1939-proxy-frames/local_instance.cc", ], shared_libs: [ "libbase", diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar index d37e6bc4f3..9e1afc8cb5 100755 --- a/test/etc/run-test-jar +++ b/test/etc/run-test-jar @@ -71,9 +71,9 @@ VDEX_FILTER="" PROFILE="n" RANDOM_PROFILE="n" # The normal dex2oat timeout. -DEX2OAT_TIMEOUT="60" +DEX2OAT_TIMEOUT="300" # 5 mins # The *hard* timeout where we really start trying to kill the dex2oat. -DEX2OAT_RT_TIMEOUT="90" +DEX2OAT_RT_TIMEOUT="360" # 6 mins # if "y", set -Xstacktracedir and inform the test of its location. When # this is set, stack trace dumps (from signal 3) will be written to a file diff --git a/test/knownfailures.json b/test/knownfailures.json index 229d618a2d..47b2f22a3c 100644 --- a/test/knownfailures.json +++ b/test/knownfailures.json @@ -1,5 +1,12 @@ [ { + "tests": [ "1939-proxy-frames", "1914-get-local-instance" ], + "description": ["Test 1939 & 1914 seems to consistently fail in gcstress on 64 bit with", + "a proxy this object having no associated class!"], + "variant": "gcstress", + "bug": "http://b/67679263" + }, + { "tests": "1934-jvmti-signal-thread", "description": ["Disables 1934-jvmti-signal-thread in tracing configurations"], "variant": "trace | stream", @@ -237,7 +244,7 @@ }, { "tests": "597-deopt-invoke-stub", - "variant": "interp-ac | interpreter | optimizing | trace | stream", + "variant": "speed-profile | interp-ac | interpreter | optimizing | trace | stream", "description": ["This test expects JIT compilation and no AOT for", "testing deoptimizing at quick-to-interpreter bridge."] }, diff --git a/test/testrunner/run_build_test_target.py b/test/testrunner/run_build_test_target.py index 49444d42cb..492b792239 100755 --- a/test/testrunner/run_build_test_target.py +++ b/test/testrunner/run_build_test_target.py @@ -96,7 +96,9 @@ if target.has_key('run-test'): run_test_command = [os.path.join(env.ANDROID_BUILD_TOP, 'art/test/testrunner/testrunner.py')] run_test_command += target.get('run-test', []) - run_test_command += ['-j', str(n_threads)] + # Let testrunner compute concurrency based on #cpus. + # b/65822340 + # run_test_command += ['-j', str(n_threads)] run_test_command += ['-b'] run_test_command += ['--host'] run_test_command += ['--verbose'] diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py index 929fca6b5d..ca29d0a484 100755 --- a/test/testrunner/testrunner.py +++ b/test/testrunner/testrunner.py @@ -237,6 +237,7 @@ def setup_test_env(): n_thread = get_default_threads('target') else: n_thread = get_default_threads('host') + print_text("Concurrency: " + str(n_thread) + "\n") global semaphore semaphore = threading.Semaphore(n_thread) @@ -480,7 +481,7 @@ def run_test(command, test, test_variant, test_name): if test_passed: print_test_info(test_name, 'PASS') else: - failed_tests.append((test_name, script_output)) + failed_tests.append((test_name, str(command) + "\n" + script_output)) if not env.ART_TEST_KEEP_GOING: stop_testrunner = True print_test_info(test_name, 'FAIL', ('%s\n%s') % ( @@ -535,10 +536,17 @@ def print_test_info(test_name, result, failed_test_info=""): total_test_count) if result == 'FAIL' or result == 'TIMEOUT': - info += ('%s %s %s\n') % ( - progress_info, - test_name, - COLOR_ERROR + result + COLOR_NORMAL) + if not verbose: + info += ('%s %s %s\n') % ( + progress_info, + test_name, + COLOR_ERROR + result + COLOR_NORMAL) + else: + info += ('%s %s %s\n%s\n') % ( + progress_info, + test_name, + COLOR_ERROR + result + COLOR_NORMAL, + failed_test_info) else: result_text = '' if result == 'PASS': diff --git a/tools/ahat/Android.mk b/tools/ahat/Android.mk index cf31e2e5bd..5eccba1327 100644 --- a/tools/ahat/Android.mk +++ b/tools/ahat/Android.mk @@ -20,9 +20,9 @@ include art/build/Android.common_path.mk # --- ahat.jar ---------------- include $(CLEAR_VARS) -LOCAL_SRC_FILES := $(call all-java-files-under, src) -LOCAL_JAR_MANIFEST := src/manifest.txt -LOCAL_JAVA_RESOURCE_FILES := $(LOCAL_PATH)/src/style.css +LOCAL_SRC_FILES := $(call all-java-files-under, src/main) +LOCAL_JAR_MANIFEST := etc/ahat.mf +LOCAL_JAVA_RESOURCE_FILES := $(LOCAL_PATH)/etc/style.css LOCAL_IS_HOST_MODULE := true LOCAL_MODULE_TAGS := optional LOCAL_MODULE := ahat @@ -49,9 +49,9 @@ ifneq ($(EMMA_INSTRUMENT),true) include $(CLEAR_VARS) LOCAL_MODULE := ahat-test-dump LOCAL_MODULE_TAGS := tests -LOCAL_SRC_FILES := $(call all-java-files-under, test-dump) +LOCAL_SRC_FILES := $(call all-java-files-under, src/test-dump) LOCAL_PROGUARD_ENABLED := obfuscation -LOCAL_PROGUARD_FLAG_FILES := test-dump/config.pro +LOCAL_PROGUARD_FLAG_FILES := etc/test-dump.pro include $(BUILD_JAVA_LIBRARY) # Determine the location of the test-dump.jar, test-dump.hprof, and proguard @@ -87,15 +87,15 @@ $(AHAT_TEST_DUMP_BASE_HPROF): $(AHAT_TEST_DUMP_JAR) $(AHAT_TEST_DUMP_DEPENDENCIE # --- ahat-tests.jar -------------- include $(CLEAR_VARS) -LOCAL_SRC_FILES := $(call all-java-files-under, test) -LOCAL_JAR_MANIFEST := test/manifest.txt +LOCAL_SRC_FILES := $(call all-java-files-under, src/test) +LOCAL_JAR_MANIFEST := etc/ahat-tests.mf LOCAL_JAVA_RESOURCE_FILES := \ $(AHAT_TEST_DUMP_HPROF) \ $(AHAT_TEST_DUMP_BASE_HPROF) \ $(AHAT_TEST_DUMP_PROGUARD_MAP) \ - $(LOCAL_PATH)/test-dump/L.hprof \ - $(LOCAL_PATH)/test-dump/O.hprof \ - $(LOCAL_PATH)/test-dump/RI.hprof + $(LOCAL_PATH)/etc/L.hprof \ + $(LOCAL_PATH)/etc/O.hprof \ + $(LOCAL_PATH)/etc/RI.hprof LOCAL_STATIC_JAVA_LIBRARIES := ahat junit-host LOCAL_IS_HOST_MODULE := true LOCAL_MODULE_TAGS := tests diff --git a/tools/ahat/test-dump/L.hprof b/tools/ahat/etc/L.hprof Binary files differindex 1acdf7965d..1acdf7965d 100644 --- a/tools/ahat/test-dump/L.hprof +++ b/tools/ahat/etc/L.hprof diff --git a/tools/ahat/test-dump/O.hprof b/tools/ahat/etc/O.hprof Binary files differindex d474c6c6b4..d474c6c6b4 100644 --- a/tools/ahat/test-dump/O.hprof +++ b/tools/ahat/etc/O.hprof diff --git a/tools/ahat/etc/README.txt b/tools/ahat/etc/README.txt new file mode 100644 index 0000000000..e9b5b22dae --- /dev/null +++ b/tools/ahat/etc/README.txt @@ -0,0 +1,9 @@ +L.hprof + A version of the test-dump hprof generated on Android L, with one of the + ROOT_DEBUGGER records manually changed to a ROOT_FINALIZING record. + +O.hprof + A version of the test-dump hprof generated on Android O. + +RI.hprof + A version of the test-dump hprof generated on the reference implementation. diff --git a/tools/ahat/test-dump/RI.hprof b/tools/ahat/etc/RI.hprof Binary files differindex 9482542a7f..9482542a7f 100644 --- a/tools/ahat/test-dump/RI.hprof +++ b/tools/ahat/etc/RI.hprof diff --git a/tools/ahat/test/manifest.txt b/tools/ahat/etc/ahat-tests.mf index af17fadded..af17fadded 100644 --- a/tools/ahat/test/manifest.txt +++ b/tools/ahat/etc/ahat-tests.mf diff --git a/tools/ahat/src/manifest.txt b/tools/ahat/etc/ahat.mf index 1753406e6e..1753406e6e 100644 --- a/tools/ahat/src/manifest.txt +++ b/tools/ahat/etc/ahat.mf diff --git a/tools/ahat/src/style.css b/tools/ahat/etc/style.css index 47fae1d551..47fae1d551 100644 --- a/tools/ahat/src/style.css +++ b/tools/ahat/etc/style.css diff --git a/tools/ahat/test-dump/config.pro b/tools/ahat/etc/test-dump.pro index 284e4b8621..284e4b8621 100644 --- a/tools/ahat/test-dump/config.pro +++ b/tools/ahat/etc/test-dump.pro diff --git a/tools/ahat/src/AhatHandler.java b/tools/ahat/src/main/com/android/ahat/AhatHandler.java index d4b4d1b107..d4b4d1b107 100644 --- a/tools/ahat/src/AhatHandler.java +++ b/tools/ahat/src/main/com/android/ahat/AhatHandler.java diff --git a/tools/ahat/src/AhatHttpHandler.java b/tools/ahat/src/main/com/android/ahat/AhatHttpHandler.java index 1d05a66653..1d05a66653 100644 --- a/tools/ahat/src/AhatHttpHandler.java +++ b/tools/ahat/src/main/com/android/ahat/AhatHttpHandler.java diff --git a/tools/ahat/src/BitmapHandler.java b/tools/ahat/src/main/com/android/ahat/BitmapHandler.java index 836aef67b8..836aef67b8 100644 --- a/tools/ahat/src/BitmapHandler.java +++ b/tools/ahat/src/main/com/android/ahat/BitmapHandler.java diff --git a/tools/ahat/src/Column.java b/tools/ahat/src/main/com/android/ahat/Column.java index 819e586ef9..819e586ef9 100644 --- a/tools/ahat/src/Column.java +++ b/tools/ahat/src/main/com/android/ahat/Column.java diff --git a/tools/ahat/src/Doc.java b/tools/ahat/src/main/com/android/ahat/Doc.java index 5a70c4c74b..5a70c4c74b 100644 --- a/tools/ahat/src/Doc.java +++ b/tools/ahat/src/main/com/android/ahat/Doc.java diff --git a/tools/ahat/src/DocString.java b/tools/ahat/src/main/com/android/ahat/DocString.java index 76e9e80d46..76e9e80d46 100644 --- a/tools/ahat/src/DocString.java +++ b/tools/ahat/src/main/com/android/ahat/DocString.java diff --git a/tools/ahat/src/DominatedList.java b/tools/ahat/src/main/com/android/ahat/DominatedList.java index 75133b2184..75133b2184 100644 --- a/tools/ahat/src/DominatedList.java +++ b/tools/ahat/src/main/com/android/ahat/DominatedList.java diff --git a/tools/ahat/src/HeapTable.java b/tools/ahat/src/main/com/android/ahat/HeapTable.java index b04f2aebf7..b04f2aebf7 100644 --- a/tools/ahat/src/HeapTable.java +++ b/tools/ahat/src/main/com/android/ahat/HeapTable.java diff --git a/tools/ahat/src/HtmlDoc.java b/tools/ahat/src/main/com/android/ahat/HtmlDoc.java index 5a22fc75fe..5a22fc75fe 100644 --- a/tools/ahat/src/HtmlDoc.java +++ b/tools/ahat/src/main/com/android/ahat/HtmlDoc.java diff --git a/tools/ahat/src/HtmlEscaper.java b/tools/ahat/src/main/com/android/ahat/HtmlEscaper.java index 75a68277d3..75a68277d3 100644 --- a/tools/ahat/src/HtmlEscaper.java +++ b/tools/ahat/src/main/com/android/ahat/HtmlEscaper.java diff --git a/tools/ahat/src/Main.java b/tools/ahat/src/main/com/android/ahat/Main.java index 31c485d851..31c485d851 100644 --- a/tools/ahat/src/Main.java +++ b/tools/ahat/src/main/com/android/ahat/Main.java diff --git a/tools/ahat/src/Menu.java b/tools/ahat/src/main/com/android/ahat/Menu.java index 6d38dc5731..6d38dc5731 100644 --- a/tools/ahat/src/Menu.java +++ b/tools/ahat/src/main/com/android/ahat/Menu.java diff --git a/tools/ahat/src/ObjectHandler.java b/tools/ahat/src/main/com/android/ahat/ObjectHandler.java index 79f8b76c92..bfd5d5cacd 100644 --- a/tools/ahat/src/ObjectHandler.java +++ b/tools/ahat/src/main/com/android/ahat/ObjectHandler.java @@ -19,6 +19,7 @@ package com.android.ahat; import com.android.ahat.heapdump.AhatArrayInstance; import com.android.ahat.heapdump.AhatClassInstance; import com.android.ahat.heapdump.AhatClassObj; +import com.android.ahat.heapdump.AhatHeap; import com.android.ahat.heapdump.AhatInstance; import com.android.ahat.heapdump.AhatSnapshot; import com.android.ahat.heapdump.DiffFields; @@ -66,7 +67,10 @@ class ObjectHandler implements AhatHandler { doc.big(Summarizer.summarize(inst)); printAllocationSite(doc, query, inst); - printGcRootPath(doc, query, inst); + + if (!inst.isUnreachable()) { + printGcRootPath(doc, query, inst); + } doc.section("Object Info"); AhatClassObj cls = inst.getClassObj(); @@ -257,23 +261,54 @@ class ObjectHandler implements AhatHandler { if (bitmap != null) { doc.section("Bitmap Image"); doc.println(DocString.image( - DocString.formattedUri("bitmap?id=%d", bitmap.getId()), "bitmap image")); + DocString.formattedUri("bitmap?id=0x%x", bitmap.getId()), "bitmap image")); } } private void printGcRootPath(Doc doc, Query query, AhatInstance inst) { doc.section("Sample Path from GC Root"); List<PathElement> path = inst.getPathFromGcRoot(); - doc.table(new Column(""), new Column("Path Element")); - doc.row(DocString.text("(rooted)"), - DocString.link(DocString.uri("root"), DocString.text("ROOT"))); - for (PathElement element : path) { - DocString label = DocString.text("→ "); - label.append(Summarizer.summarize(element.instance)); - label.append(element.field); - doc.row(DocString.text(element.isDominator ? "(dominator)" : ""), label); + + // Add a dummy PathElement as a marker for the root. + final PathElement root = new PathElement(null, null); + path.add(0, root); + + HeapTable.TableConfig<PathElement> table = new HeapTable.TableConfig<PathElement>() { + public String getHeapsDescription() { + return "Bytes Retained by Heap (Dominators Only)"; + } + + public long getSize(PathElement element, AhatHeap heap) { + if (element == root) { + return heap.getSize().getSize(); + } + if (element.isDominator) { + return element.instance.getRetainedSize(heap).getSize(); + } + return 0; + } + + public List<HeapTable.ValueConfig<PathElement>> getValueConfigs() { + HeapTable.ValueConfig<PathElement> value = new HeapTable.ValueConfig<PathElement>() { + public String getDescription() { + return "Path Element"; + } + + public DocString render(PathElement element) { + if (element == root) { + return DocString.link(DocString.uri("rooted"), DocString.text("ROOT")); + } else { + DocString label = DocString.text("→ "); + label.append(Summarizer.summarize(element.instance)); + label.append(element.field); + return label; + } + } + }; + return Collections.singletonList(value); + } }; - doc.end(); + HeapTable.render(doc, query, DOMINATOR_PATH_ID, table, mSnapshot, path); } public void printDominatedObjects(Doc doc, Query query, AhatInstance inst) { diff --git a/tools/ahat/src/ObjectsHandler.java b/tools/ahat/src/main/com/android/ahat/ObjectsHandler.java index 1a8f018bd5..1a8f018bd5 100644 --- a/tools/ahat/src/ObjectsHandler.java +++ b/tools/ahat/src/main/com/android/ahat/ObjectsHandler.java diff --git a/tools/ahat/src/OverviewHandler.java b/tools/ahat/src/main/com/android/ahat/OverviewHandler.java index c9f84259a9..c9f84259a9 100644 --- a/tools/ahat/src/OverviewHandler.java +++ b/tools/ahat/src/main/com/android/ahat/OverviewHandler.java diff --git a/tools/ahat/src/Query.java b/tools/ahat/src/main/com/android/ahat/Query.java index f910608771..9c2783c081 100644 --- a/tools/ahat/src/Query.java +++ b/tools/ahat/src/main/com/android/ahat/Query.java @@ -65,7 +65,7 @@ class Query { */ public long getLong(String name, long defaultValue) { String value = get(name, null); - return value == null ? defaultValue : Long.parseLong(value); + return value == null ? defaultValue : Long.decode(value); } /** @@ -73,7 +73,7 @@ class Query { */ public int getInt(String name, int defaultValue) { String value = get(name, null); - return value == null ? defaultValue : Integer.parseInt(value); + return value == null ? defaultValue : Integer.decode(value); } /** diff --git a/tools/ahat/src/RootedHandler.java b/tools/ahat/src/main/com/android/ahat/RootedHandler.java index 26451a3963..26451a3963 100644 --- a/tools/ahat/src/RootedHandler.java +++ b/tools/ahat/src/main/com/android/ahat/RootedHandler.java diff --git a/tools/ahat/src/SiteHandler.java b/tools/ahat/src/main/com/android/ahat/SiteHandler.java index 543eaa376a..543eaa376a 100644 --- a/tools/ahat/src/SiteHandler.java +++ b/tools/ahat/src/main/com/android/ahat/SiteHandler.java diff --git a/tools/ahat/src/SitePrinter.java b/tools/ahat/src/main/com/android/ahat/SitePrinter.java index 32037f4414..32037f4414 100644 --- a/tools/ahat/src/SitePrinter.java +++ b/tools/ahat/src/main/com/android/ahat/SitePrinter.java diff --git a/tools/ahat/src/SizeTable.java b/tools/ahat/src/main/com/android/ahat/SizeTable.java index 46e395669f..46e395669f 100644 --- a/tools/ahat/src/SizeTable.java +++ b/tools/ahat/src/main/com/android/ahat/SizeTable.java diff --git a/tools/ahat/src/StaticHandler.java b/tools/ahat/src/main/com/android/ahat/StaticHandler.java index 4a68f1c12f..4a68f1c12f 100644 --- a/tools/ahat/src/StaticHandler.java +++ b/tools/ahat/src/main/com/android/ahat/StaticHandler.java diff --git a/tools/ahat/src/SubsetSelector.java b/tools/ahat/src/main/com/android/ahat/SubsetSelector.java index 79399c178b..79399c178b 100644 --- a/tools/ahat/src/SubsetSelector.java +++ b/tools/ahat/src/main/com/android/ahat/SubsetSelector.java diff --git a/tools/ahat/src/Summarizer.java b/tools/ahat/src/main/com/android/ahat/Summarizer.java index 50b2e4b3b3..ae0776ab0b 100644 --- a/tools/ahat/src/Summarizer.java +++ b/tools/ahat/src/main/com/android/ahat/Summarizer.java @@ -51,7 +51,9 @@ class Summarizer { } // Annotate unreachable objects as such. - if (!inst.isReachable()) { + if (inst.isWeaklyReachable()) { + formatted.append("weak "); + } else if (inst.isUnreachable()) { formatted.append("unreachable "); } @@ -65,7 +67,7 @@ class Summarizer { // Don't make links to placeholder objects. formatted.append(linkText); } else { - URI objTarget = DocString.formattedUri("object?id=%d", inst.getId()); + URI objTarget = DocString.formattedUri("object?id=0x%x", inst.getId()); formatted.appendLink(objTarget, linkText); } @@ -100,7 +102,7 @@ class Summarizer { AhatInstance bitmap = inst.getAssociatedBitmapInstance(); String thumbnail = ""; if (bitmap != null) { - URI uri = DocString.formattedUri("bitmap?id=%d", bitmap.getId()); + URI uri = DocString.formattedUri("bitmap?id=0x%x", bitmap.getId()); formatted.appendThumbnail(uri, "bitmap image"); } return formatted; diff --git a/tools/ahat/src/dominators/DominatorsComputation.java b/tools/ahat/src/main/com/android/ahat/dominators/DominatorsComputation.java index 58b7b59f9a..58b7b59f9a 100644 --- a/tools/ahat/src/dominators/DominatorsComputation.java +++ b/tools/ahat/src/main/com/android/ahat/dominators/DominatorsComputation.java diff --git a/tools/ahat/src/heapdump/AhatArrayInstance.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatArrayInstance.java index 50a4805bed..50a4805bed 100644 --- a/tools/ahat/src/heapdump/AhatArrayInstance.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatArrayInstance.java diff --git a/tools/ahat/src/heapdump/AhatClassInstance.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassInstance.java index 94efa5049f..94efa5049f 100644 --- a/tools/ahat/src/heapdump/AhatClassInstance.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassInstance.java diff --git a/tools/ahat/src/heapdump/AhatClassObj.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassObj.java index be0f71306e..be0f71306e 100644 --- a/tools/ahat/src/heapdump/AhatClassObj.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassObj.java diff --git a/tools/ahat/src/heapdump/AhatField.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatField.java index a25ee2869d..a25ee2869d 100644 --- a/tools/ahat/src/heapdump/AhatField.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatField.java diff --git a/tools/ahat/src/heapdump/AhatHeap.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatHeap.java index b8897a182c..b8897a182c 100644 --- a/tools/ahat/src/heapdump/AhatHeap.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatHeap.java diff --git a/tools/ahat/src/heapdump/AhatInstance.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java index c04448728f..cb2d738f23 100644 --- a/tools/ahat/src/heapdump/AhatInstance.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java @@ -136,13 +136,28 @@ public abstract class AhatInstance implements Diffable<AhatInstance>, } /** - * Returns whether this object is strongly-reachable. + * Returns true if this object is strongly-reachable. */ - public boolean isReachable() { + public boolean isStronglyReachable() { return mImmediateDominator != null; } /** + * Returns true if this object is reachable only through a + * soft/weak/phantom/finalizer reference. + */ + public boolean isWeaklyReachable() { + return !isStronglyReachable() && mNextInstanceToGcRoot != null; + } + + /** + * Returns true if this object is completely unreachable. + */ + public boolean isUnreachable() { + return !isStronglyReachable() && !isWeaklyReachable(); + } + + /** * Returns the heap that this instance is allocated on. */ public AhatHeap getHeap() { @@ -499,6 +514,10 @@ public abstract class AhatInstance implements Diffable<AhatInstance>, } else { if (ref.ref.mSoftReverseReferences == null) { ref.ref.mSoftReverseReferences = new ArrayList<AhatInstance>(); + if (ref.ref.mNextInstanceToGcRoot == null) { + ref.ref.mNextInstanceToGcRoot = ref.src; + ref.ref.mNextInstanceToGcRootField = ref.field; + } } ref.ref.mSoftReverseReferences.add(ref.src); } diff --git a/tools/ahat/src/heapdump/AhatPlaceHolderClassObj.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatPlaceHolderClassObj.java index 07f5b50012..07f5b50012 100644 --- a/tools/ahat/src/heapdump/AhatPlaceHolderClassObj.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatPlaceHolderClassObj.java diff --git a/tools/ahat/src/heapdump/AhatPlaceHolderInstance.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatPlaceHolderInstance.java index 884940370d..884940370d 100644 --- a/tools/ahat/src/heapdump/AhatPlaceHolderInstance.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatPlaceHolderInstance.java diff --git a/tools/ahat/src/heapdump/AhatSnapshot.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatSnapshot.java index 945966cec7..945966cec7 100644 --- a/tools/ahat/src/heapdump/AhatSnapshot.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatSnapshot.java diff --git a/tools/ahat/src/heapdump/Diff.java b/tools/ahat/src/main/com/android/ahat/heapdump/Diff.java index 98c7e58d56..98c7e58d56 100644 --- a/tools/ahat/src/heapdump/Diff.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/Diff.java diff --git a/tools/ahat/src/heapdump/DiffFields.java b/tools/ahat/src/main/com/android/ahat/heapdump/DiffFields.java index e3c671fe21..e3c671fe21 100644 --- a/tools/ahat/src/heapdump/DiffFields.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/DiffFields.java diff --git a/tools/ahat/src/heapdump/Diffable.java b/tools/ahat/src/main/com/android/ahat/heapdump/Diffable.java index 53442c857e..53442c857e 100644 --- a/tools/ahat/src/heapdump/Diffable.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/Diffable.java diff --git a/tools/ahat/src/heapdump/DiffedFieldValue.java b/tools/ahat/src/main/com/android/ahat/heapdump/DiffedFieldValue.java index 3cd273ed98..3cd273ed98 100644 --- a/tools/ahat/src/heapdump/DiffedFieldValue.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/DiffedFieldValue.java diff --git a/tools/ahat/src/heapdump/DominatorReferenceIterator.java b/tools/ahat/src/main/com/android/ahat/heapdump/DominatorReferenceIterator.java index 0b99e496cc..0b99e496cc 100644 --- a/tools/ahat/src/heapdump/DominatorReferenceIterator.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/DominatorReferenceIterator.java diff --git a/tools/ahat/src/heapdump/Field.java b/tools/ahat/src/main/com/android/ahat/heapdump/Field.java index dff401796a..dff401796a 100644 --- a/tools/ahat/src/heapdump/Field.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/Field.java diff --git a/tools/ahat/src/heapdump/FieldValue.java b/tools/ahat/src/main/com/android/ahat/heapdump/FieldValue.java index 20e6da7271..20e6da7271 100644 --- a/tools/ahat/src/heapdump/FieldValue.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/FieldValue.java diff --git a/tools/ahat/src/heapdump/HprofFormatException.java b/tools/ahat/src/main/com/android/ahat/heapdump/HprofFormatException.java index 0e128cd50a..0e128cd50a 100644 --- a/tools/ahat/src/heapdump/HprofFormatException.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/HprofFormatException.java diff --git a/tools/ahat/src/heapdump/Instances.java b/tools/ahat/src/main/com/android/ahat/heapdump/Instances.java index 085144650f..085144650f 100644 --- a/tools/ahat/src/heapdump/Instances.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/Instances.java diff --git a/tools/ahat/src/heapdump/Parser.java b/tools/ahat/src/main/com/android/ahat/heapdump/Parser.java index 756b7d2554..756b7d2554 100644 --- a/tools/ahat/src/heapdump/Parser.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/Parser.java diff --git a/tools/ahat/src/heapdump/PathElement.java b/tools/ahat/src/main/com/android/ahat/heapdump/PathElement.java index 196a24628c..196a24628c 100644 --- a/tools/ahat/src/heapdump/PathElement.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/PathElement.java diff --git a/tools/ahat/src/heapdump/Reference.java b/tools/ahat/src/main/com/android/ahat/heapdump/Reference.java index 980f2780b6..980f2780b6 100644 --- a/tools/ahat/src/heapdump/Reference.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/Reference.java diff --git a/tools/ahat/src/heapdump/RootType.java b/tools/ahat/src/main/com/android/ahat/heapdump/RootType.java index af552ea2c9..af552ea2c9 100644 --- a/tools/ahat/src/heapdump/RootType.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/RootType.java diff --git a/tools/ahat/src/heapdump/Site.java b/tools/ahat/src/main/com/android/ahat/heapdump/Site.java index 821493f1be..523550ad2c 100644 --- a/tools/ahat/src/heapdump/Site.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/Site.java @@ -186,7 +186,7 @@ public class Site implements Diffable<Site> { // Add all reachable objects allocated at this site. for (AhatInstance inst : mObjects) { - if (inst.isReachable()) { + if (inst.isStronglyReachable()) { AhatHeap heap = inst.getHeap(); Size size = inst.getSize(); ObjectsInfo info = getObjectsInfo(heap, inst.getClassObj()); diff --git a/tools/ahat/src/heapdump/Size.java b/tools/ahat/src/main/com/android/ahat/heapdump/Size.java index 7c8db900df..7c8db900df 100644 --- a/tools/ahat/src/heapdump/Size.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/Size.java diff --git a/tools/ahat/src/heapdump/SkipNullsIterator.java b/tools/ahat/src/main/com/android/ahat/heapdump/SkipNullsIterator.java index e99fe5e8ea..e99fe5e8ea 100644 --- a/tools/ahat/src/heapdump/SkipNullsIterator.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/SkipNullsIterator.java diff --git a/tools/ahat/src/heapdump/Sort.java b/tools/ahat/src/main/com/android/ahat/heapdump/Sort.java index efe0d6b59b..efe0d6b59b 100644 --- a/tools/ahat/src/heapdump/Sort.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/Sort.java diff --git a/tools/ahat/src/heapdump/SuperRoot.java b/tools/ahat/src/main/com/android/ahat/heapdump/SuperRoot.java index a2adbd2808..a2adbd2808 100644 --- a/tools/ahat/src/heapdump/SuperRoot.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/SuperRoot.java diff --git a/tools/ahat/src/heapdump/Type.java b/tools/ahat/src/main/com/android/ahat/heapdump/Type.java index 726bc47cf2..726bc47cf2 100644 --- a/tools/ahat/src/heapdump/Type.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/Type.java diff --git a/tools/ahat/src/heapdump/Value.java b/tools/ahat/src/main/com/android/ahat/heapdump/Value.java index 01fd25057d..01fd25057d 100644 --- a/tools/ahat/src/heapdump/Value.java +++ b/tools/ahat/src/main/com/android/ahat/heapdump/Value.java diff --git a/tools/ahat/src/proguard/ProguardMap.java b/tools/ahat/src/main/com/android/ahat/proguard/ProguardMap.java index 50c110aad4..50c110aad4 100644 --- a/tools/ahat/src/proguard/ProguardMap.java +++ b/tools/ahat/src/main/com/android/ahat/proguard/ProguardMap.java diff --git a/tools/ahat/test-dump/Main.java b/tools/ahat/src/test-dump/Main.java index 333d28c214..333d28c214 100644 --- a/tools/ahat/test-dump/Main.java +++ b/tools/ahat/src/test-dump/Main.java diff --git a/tools/ahat/test/DiffFieldsTest.java b/tools/ahat/src/test/com/android/ahat/DiffFieldsTest.java index 19399757a6..19399757a6 100644 --- a/tools/ahat/test/DiffFieldsTest.java +++ b/tools/ahat/src/test/com/android/ahat/DiffFieldsTest.java diff --git a/tools/ahat/test/DiffTest.java b/tools/ahat/src/test/com/android/ahat/DiffTest.java index 585f29ae61..585f29ae61 100644 --- a/tools/ahat/test/DiffTest.java +++ b/tools/ahat/src/test/com/android/ahat/DiffTest.java diff --git a/tools/ahat/test/DominatorsTest.java b/tools/ahat/src/test/com/android/ahat/DominatorsTest.java index 0424e10dc8..0424e10dc8 100644 --- a/tools/ahat/test/DominatorsTest.java +++ b/tools/ahat/src/test/com/android/ahat/DominatorsTest.java diff --git a/tools/ahat/test/HtmlEscaperTest.java b/tools/ahat/src/test/com/android/ahat/HtmlEscaperTest.java index a36db356f5..a36db356f5 100644 --- a/tools/ahat/test/HtmlEscaperTest.java +++ b/tools/ahat/src/test/com/android/ahat/HtmlEscaperTest.java diff --git a/tools/ahat/test/InstanceTest.java b/tools/ahat/src/test/com/android/ahat/InstanceTest.java index 49a21e2d70..a4908fd0ab 100644 --- a/tools/ahat/test/InstanceTest.java +++ b/tools/ahat/src/test/com/android/ahat/InstanceTest.java @@ -214,7 +214,9 @@ public class InstanceTest { // reference as having a non-null referent. TestDump dump = TestDump.getTestDump(); AhatInstance ref = dump.getDumpedAhatInstance("aSoftReference"); - assertNotNull(ref.getReferent()); + AhatInstance referent = ref.getReferent(); + assertNotNull(referent); + assertTrue(referent.isWeaklyReachable()); } @Test diff --git a/tools/ahat/test/NativeAllocationTest.java b/tools/ahat/src/test/com/android/ahat/NativeAllocationTest.java index 7436be8311..7436be8311 100644 --- a/tools/ahat/test/NativeAllocationTest.java +++ b/tools/ahat/src/test/com/android/ahat/NativeAllocationTest.java diff --git a/tools/ahat/test/ObjectHandlerTest.java b/tools/ahat/src/test/com/android/ahat/ObjectHandlerTest.java index 1b8a781e0c..1b8a781e0c 100644 --- a/tools/ahat/test/ObjectHandlerTest.java +++ b/tools/ahat/src/test/com/android/ahat/ObjectHandlerTest.java diff --git a/tools/ahat/test/OverviewHandlerTest.java b/tools/ahat/src/test/com/android/ahat/OverviewHandlerTest.java index c2f773b64b..c2f773b64b 100644 --- a/tools/ahat/test/OverviewHandlerTest.java +++ b/tools/ahat/src/test/com/android/ahat/OverviewHandlerTest.java diff --git a/tools/ahat/test/PerformanceTest.java b/tools/ahat/src/test/com/android/ahat/PerformanceTest.java index e13974bb6f..e13974bb6f 100644 --- a/tools/ahat/test/PerformanceTest.java +++ b/tools/ahat/src/test/com/android/ahat/PerformanceTest.java diff --git a/tools/ahat/test/ProguardMapTest.java b/tools/ahat/src/test/com/android/ahat/ProguardMapTest.java index ad40f45665..ad40f45665 100644 --- a/tools/ahat/test/ProguardMapTest.java +++ b/tools/ahat/src/test/com/android/ahat/ProguardMapTest.java diff --git a/tools/ahat/test/QueryTest.java b/tools/ahat/src/test/com/android/ahat/QueryTest.java index 5bcf8eafc3..5bcf8eafc3 100644 --- a/tools/ahat/test/QueryTest.java +++ b/tools/ahat/src/test/com/android/ahat/QueryTest.java diff --git a/tools/ahat/test/RootedHandlerTest.java b/tools/ahat/src/test/com/android/ahat/RootedHandlerTest.java index f325b8e9a7..f325b8e9a7 100644 --- a/tools/ahat/test/RootedHandlerTest.java +++ b/tools/ahat/src/test/com/android/ahat/RootedHandlerTest.java diff --git a/tools/ahat/test/SiteHandlerTest.java b/tools/ahat/src/test/com/android/ahat/SiteHandlerTest.java index 37596be8bb..37596be8bb 100644 --- a/tools/ahat/test/SiteHandlerTest.java +++ b/tools/ahat/src/test/com/android/ahat/SiteHandlerTest.java diff --git a/tools/ahat/test/SiteTest.java b/tools/ahat/src/test/com/android/ahat/SiteTest.java index dc0fe08297..dc0fe08297 100644 --- a/tools/ahat/test/SiteTest.java +++ b/tools/ahat/src/test/com/android/ahat/SiteTest.java diff --git a/tools/ahat/test/TestDump.java b/tools/ahat/src/test/com/android/ahat/TestDump.java index a0d1021ef1..a0d1021ef1 100644 --- a/tools/ahat/test/TestDump.java +++ b/tools/ahat/src/test/com/android/ahat/TestDump.java diff --git a/tools/ahat/test/TestHandler.java b/tools/ahat/src/test/com/android/ahat/TestHandler.java index 859e39a688..859e39a688 100644 --- a/tools/ahat/test/TestHandler.java +++ b/tools/ahat/src/test/com/android/ahat/TestHandler.java diff --git a/tools/ahat/test/Tests.java b/tools/ahat/src/test/com/android/ahat/Tests.java index 0e7043291d..0e7043291d 100644 --- a/tools/ahat/test/Tests.java +++ b/tools/ahat/src/test/com/android/ahat/Tests.java diff --git a/tools/ahat/test-dump/README.txt b/tools/ahat/test-dump/README.txt deleted file mode 100644 index e7ea584b26..0000000000 --- a/tools/ahat/test-dump/README.txt +++ /dev/null @@ -1,7 +0,0 @@ - -Main.java - A program used to generate a heap dump used for tests. -L.hprof - A version of the test dump generated on Android L, - with one of the ROOT_DEBUGGER records manually changed to a - ROOT_FINALIZING record. -O.hprof - A version of the test dump generated on Android O. -RI.hprof - A version of the test dump generated on the reference implementation. @@ -220,6 +220,11 @@ function detect_boot_image_location() { echo "$image_location" } +# If android logging is not explicitly set, only print warnings and errors. +if [ -z "$ANDROID_LOG_TAGS" ]; then + ANDROID_LOG_TAGS='*:w' +fi + # Runs dalvikvm, returns its exit code. # (Oat directories are cleaned up in between runs) function run_art() { @@ -229,15 +234,16 @@ function run_art() { # First cleanup any left-over 'oat' files from the last time dalvikvm was run. cleanup_oat_directory_for_classpath "$@" # Run dalvikvm. - verbose_run ANDROID_DATA=$ANDROID_DATA \ - ANDROID_ROOT=$ANDROID_ROOT \ - LD_LIBRARY_PATH=$LD_LIBRARY_PATH \ - PATH=$ANDROID_ROOT/bin:$PATH \ - LD_USE_LOAD_BIAS=1 \ - $LAUNCH_WRAPPER $ART_BINARY_PATH $lib \ - -XXlib:$LIBART \ - -Xnorelocate \ - -Ximage:"$image_location" \ + verbose_run ANDROID_DATA="$ANDROID_DATA" \ + ANDROID_ROOT="$ANDROID_ROOT" \ + LD_LIBRARY_PATH="$LD_LIBRARY_PATH" \ + PATH="$ANDROID_ROOT/bin:$PATH" \ + LD_USE_LOAD_BIAS=1 \ + ANDROID_LOG_TAGS="$ANDROID_LOG_TAGS" \ + $LAUNCH_WRAPPER $ART_BINARY_PATH $lib \ + -XXlib:"$LIBART" \ + -Xnorelocate \ + -Ximage:"$image_location" \ "$@" ret=$? diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh index 4f99ac33a9..ab604b2ded 100755 --- a/tools/buildbot-build.sh +++ b/tools/buildbot-build.sh @@ -70,6 +70,7 @@ if [[ $mode == "host" ]]; then make_command="make $j_arg $showcommands build-art-host-tests $common_targets dx-tests" make_command+=" ${out_dir}/host/linux-x86/lib/libjavacoretests.so " make_command+=" ${out_dir}/host/linux-x86/lib64/libjavacoretests.so" + make_command+=" libwrapagentpropertiesd libwrapagentproperties" elif [[ $mode == "target" ]]; then make_command="make $j_arg $showcommands build-art-target-tests $common_targets" make_command+=" libjavacrypto libjavacoretests libnetd_client linker toybox toolbox sh" diff --git a/tools/libjdwp_art_failures.txt b/tools/libjdwp_art_failures.txt index 6b5daec5a7..8d67c45cce 100644 --- a/tools/libjdwp_art_failures.txt +++ b/tools/libjdwp_art_failures.txt @@ -48,7 +48,7 @@ name: "org.apache.harmony.jpda.tests.jdwp.Events.VMDeath002Test#testVMDeathRequest" }, { - description: "Test fails with INTERNAL error due to proxy frame!", + description: "Test fails with OPAQUE_FRAME error due to attempting a GetLocalReference on a proxy frame instead of GetLocalInstance!", result: EXEC_FAILED, bug: 66903662, name: "org.apache.harmony.jpda.tests.jdwp.StackFrame.ProxyThisObjectTest#testThisObject" @@ -71,6 +71,33 @@ "org.apache.harmony.jpda.tests.jdwp.EventModifiers.InstanceOnlyModifierTest#testMethodExit", "org.apache.harmony.jpda.tests.jdwp.EventModifiers.InstanceOnlyModifierTest#testMethodExitWithReturnValue" ] }, +/* TODO Investigate these failures more closely */ +{ + description: "Tests that fail when run on the chromium buildbots against the prebuilt libjdwp.so in certain configurations", + result: EXEC_FAILED, + bug: 67497270, + names: [ + "org.apache.harmony.jpda.tests.jdwp.Events.CombinedEvents003Test#testCombinedEvents003_01", + "org.apache.harmony.jpda.tests.jdwp.Events.CombinedEventsTest#testCombinedEvents_01", + "org.apache.harmony.jpda.tests.jdwp.Events.CombinedEventsTest#testCombinedEvents_02", + "org.apache.harmony.jpda.tests.jdwp.Events.CombinedEventsTest#testCombinedEvents_03", + "org.apache.harmony.jpda.tests.jdwp.Events.CombinedEventsTest#testCombinedEvents_04", + "org.apache.harmony.jpda.tests.jdwp.Events.CombinedEventsTest#testCombinedEvents_06", + "org.apache.harmony.jpda.tests.jdwp.Events.VMDeathTest#testVMDeathEvent", + "org.apache.harmony.jpda.tests.jdwp.MultiSession.ClassPrepareTest#testClassPrepare001", + "org.apache.harmony.jpda.tests.jdwp.MultiSession.ExceptionTest#testException001", + "org.apache.harmony.jpda.tests.jdwp.MultiSession.FieldAccessTest#testFieldAccess001", + "org.apache.harmony.jpda.tests.jdwp.MultiSession.FieldModificationTest#testFieldModification001", + "org.apache.harmony.jpda.tests.jdwp.MultiSession.SingleStepTest#testSingleStep001", + "org.apache.harmony.jpda.tests.jdwp.MultiSession.VMDeathTest#testVMDeathRequest", + "org.apache.harmony.jpda.tests.jdwp.ReferenceType.SignatureWithGenericTest#testSignatureWithGeneric001", + "org.apache.harmony.jpda.tests.jdwp.StackFrame.GetValues002Test#testGetValues005_Int2", + "org.apache.harmony.jpda.tests.jdwp.VirtualMachine.SetDefaultStratumTest#testSetDefaultStratum001", + "org.apache.harmony.jpda.tests.jdwp.ThreadReference.StatusTest#testStatus001", + "org.apache.harmony.jpda.tests.jdwp.VirtualMachine.AllClassesTest#testAllClasses002", + "org.apache.harmony.jpda.tests.jdwp.VirtualMachine.AllClassesWithGenericTest#testAllClassesWithGeneric001" + ] +}, /* TODO Categorize these failures more. */ { description: "Tests that fail on both ART and RI. These tests are likely incorrect", |