Merge "Move to interpreter if async-exceptions are pending."
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index 521156a..87bf1c4 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -290,26 +290,42 @@
static const char* Name() { return "double"; }
};
+template <typename T>
+static inline CmdlineParseResult<T> ParseNumeric(const std::string& str) {
+ static_assert(sizeof(T) < sizeof(long long int), // NOLINT [runtime/int] [4]
+ "Current support is restricted.");
+
+ const char* begin = str.c_str();
+ char* end;
+
+ // Parse into a larger type (long long) because we can't use strtoul
+ // since it silently converts negative values into unsigned long and doesn't set errno.
+ errno = 0;
+ long long int result = strtoll(begin, &end, 10); // NOLINT [runtime/int] [4]
+ if (begin == end || *end != '\0' || errno == EINVAL) {
+ return CmdlineParseResult<T>::Failure("Failed to parse integer from " + str);
+ } else if ((errno == ERANGE) || // NOLINT [runtime/int] [4]
+ result < std::numeric_limits<T>::min() || result > std::numeric_limits<T>::max()) {
+ return CmdlineParseResult<T>::OutOfRange(
+ "Failed to parse integer from " + str + "; out of range");
+ }
+
+ return CmdlineParseResult<T>::Success(static_cast<T>(result));
+}
+
template <>
struct CmdlineType<unsigned int> : CmdlineTypeParser<unsigned int> {
Result Parse(const std::string& str) {
- const char* begin = str.c_str();
- char* end;
+ return ParseNumeric<unsigned int>(str);
+ }
- // Parse into a larger type (long long) because we can't use strtoul
- // since it silently converts negative values into unsigned long and doesn't set errno.
- errno = 0;
- long long int result = strtoll(begin, &end, 10); // NOLINT [runtime/int] [4]
- if (begin == end || *end != '\0' || errno == EINVAL) {
- return Result::Failure("Failed to parse integer from " + str);
- } else if ((errno == ERANGE) || // NOLINT [runtime/int] [4]
- result < std::numeric_limits<int>::min()
- || result > std::numeric_limits<unsigned int>::max() || result < 0) {
- return Result::OutOfRange(
- "Failed to parse integer from " + str + "; out of unsigned int range");
- }
+ static const char* Name() { return "unsigned integer"; }
+};
- return Result::Success(static_cast<unsigned int>(result));
+template <>
+struct CmdlineType<int> : CmdlineTypeParser<int> {
+ Result Parse(const std::string& str) {
+ return ParseNumeric<int>(str);
}
static const char* Name() { return "unsigned integer"; }
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 59ca4c7..1e4cdf2 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -193,6 +193,10 @@
"liblzma",
],
include_dirs: ["art/disassembler"],
+ header_libs: [
+ "art_cmdlineparser_headers", // For compiler_options.
+ ],
+
export_include_dirs: ["."],
}
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 538845d..b6cedff 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -18,7 +18,13 @@
#include <fstream>
+#include "android-base/stringprintf.h"
+
+#include "base/variant_map.h"
+#include "cmdline_parser.h"
+#include "compiler_options_map-inl.h"
#include "runtime.h"
+#include "simple_compiler_options_map.h"
namespace art {
@@ -71,115 +77,50 @@
(kIsTargetBuild || IsCoreImage() || Runtime::Current()->UseJitCompilation());
}
-void CompilerOptions::ParseHugeMethodMax(const StringPiece& option, UsageFn Usage) {
- ParseUintOption(option, "--huge-method-max", &huge_method_threshold_, Usage);
-}
-
-void CompilerOptions::ParseLargeMethodMax(const StringPiece& option, UsageFn Usage) {
- ParseUintOption(option, "--large-method-max", &large_method_threshold_, Usage);
-}
-
-void CompilerOptions::ParseSmallMethodMax(const StringPiece& option, UsageFn Usage) {
- ParseUintOption(option, "--small-method-max", &small_method_threshold_, Usage);
-}
-
-void CompilerOptions::ParseTinyMethodMax(const StringPiece& option, UsageFn Usage) {
- ParseUintOption(option, "--tiny-method-max", &tiny_method_threshold_, Usage);
-}
-
-void CompilerOptions::ParseNumDexMethods(const StringPiece& option, UsageFn Usage) {
- ParseUintOption(option, "--num-dex-methods", &num_dex_methods_threshold_, Usage);
-}
-
-void CompilerOptions::ParseInlineMaxCodeUnits(const StringPiece& option, UsageFn Usage) {
- ParseUintOption(option, "--inline-max-code-units", &inline_max_code_units_, Usage);
-}
-
-void CompilerOptions::ParseDumpInitFailures(const StringPiece& option,
- UsageFn Usage ATTRIBUTE_UNUSED) {
- DCHECK(option.starts_with("--dump-init-failures="));
- std::string file_name = option.substr(strlen("--dump-init-failures=")).data();
- init_failure_output_.reset(new std::ofstream(file_name));
+bool CompilerOptions::ParseDumpInitFailures(const std::string& option, std::string* error_msg) {
+ init_failure_output_.reset(new std::ofstream(option));
if (init_failure_output_.get() == nullptr) {
- LOG(ERROR) << "Failed to allocate ofstream";
+ *error_msg = "Failed to construct std::ofstream";
+ return false;
} else if (init_failure_output_->fail()) {
- LOG(ERROR) << "Failed to open " << file_name << " for writing the initialization "
- << "failures.";
+ *error_msg = android::base::StringPrintf(
+ "Failed to open %s for writing the initialization failures.", option.c_str());
init_failure_output_.reset();
- }
-}
-
-void CompilerOptions::ParseRegisterAllocationStrategy(const StringPiece& option,
- UsageFn Usage) {
- DCHECK(option.starts_with("--register-allocation-strategy="));
- StringPiece choice = option.substr(strlen("--register-allocation-strategy=")).data();
- if (choice == "linear-scan") {
- register_allocation_strategy_ = RegisterAllocator::Strategy::kRegisterAllocatorLinearScan;
- } else if (choice == "graph-color") {
- register_allocation_strategy_ = RegisterAllocator::Strategy::kRegisterAllocatorGraphColor;
- } else {
- Usage("Unrecognized register allocation strategy. Try linear-scan, or graph-color.");
- }
-}
-
-bool CompilerOptions::ParseCompilerOption(const StringPiece& option, UsageFn Usage) {
- if (option.starts_with("--compiler-filter=")) {
- const char* compiler_filter_string = option.substr(strlen("--compiler-filter=")).data();
- if (!CompilerFilter::ParseCompilerFilter(compiler_filter_string, &compiler_filter_)) {
- Usage("Unknown --compiler-filter value %s", compiler_filter_string);
- }
- } else if (option == "--compile-pic") {
- compile_pic_ = true;
- } else if (option.starts_with("--huge-method-max=")) {
- ParseHugeMethodMax(option, Usage);
- } else if (option.starts_with("--large-method-max=")) {
- ParseLargeMethodMax(option, Usage);
- } else if (option.starts_with("--small-method-max=")) {
- ParseSmallMethodMax(option, Usage);
- } else if (option.starts_with("--tiny-method-max=")) {
- ParseTinyMethodMax(option, Usage);
- } else if (option.starts_with("--num-dex-methods=")) {
- ParseNumDexMethods(option, Usage);
- } else if (option.starts_with("--inline-max-code-units=")) {
- ParseInlineMaxCodeUnits(option, Usage);
- } else if (option == "--generate-debug-info" || option == "-g") {
- generate_debug_info_ = true;
- } else if (option == "--no-generate-debug-info") {
- generate_debug_info_ = false;
- } else if (option == "--generate-mini-debug-info") {
- generate_mini_debug_info_ = true;
- } else if (option == "--no-generate-mini-debug-info") {
- generate_mini_debug_info_ = false;
- } else if (option == "--generate-build-id") {
- generate_build_id_ = true;
- } else if (option == "--no-generate-build-id") {
- generate_build_id_ = false;
- } else if (option == "--debuggable") {
- debuggable_ = true;
- } else if (option.starts_with("--top-k-profile-threshold=")) {
- ParseDouble(option.data(), '=', 0.0, 100.0, &top_k_profile_threshold_, Usage);
- } else if (option == "--abort-on-hard-verifier-error") {
- abort_on_hard_verifier_failure_ = true;
- } else if (option == "--no-abort-on-hard-verifier-error") {
- abort_on_hard_verifier_failure_ = false;
- } else if (option.starts_with("--dump-init-failures=")) {
- ParseDumpInitFailures(option, Usage);
- } else if (option.starts_with("--dump-cfg=")) {
- dump_cfg_file_name_ = option.substr(strlen("--dump-cfg=")).data();
- } else if (option == "--dump-cfg-append") {
- dump_cfg_append_ = true;
- } else if (option.starts_with("--register-allocation-strategy=")) {
- ParseRegisterAllocationStrategy(option, Usage);
- } else if (option.starts_with("--verbose-methods=")) {
- // TODO: rather than switch off compiler logging, make all VLOG(compiler) messages
- // conditional on having verbose methods.
- gLogVerbosity.compiler = false;
- Split(option.substr(strlen("--verbose-methods=")).ToString(), ',', &verbose_methods_);
- } else {
- // Option not recognized.
return false;
}
return true;
}
+bool CompilerOptions::ParseRegisterAllocationStrategy(const std::string& option,
+ std::string* error_msg) {
+ if (option == "linear-scan") {
+ register_allocation_strategy_ = RegisterAllocator::Strategy::kRegisterAllocatorLinearScan;
+ } else if (option == "graph-color") {
+ register_allocation_strategy_ = RegisterAllocator::Strategy::kRegisterAllocatorGraphColor;
+ } else {
+ *error_msg = "Unrecognized register allocation strategy. Try linear-scan, or graph-color.";
+ return false;
+ }
+ return true;
+}
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wframe-larger-than="
+
+bool CompilerOptions::ParseCompilerOptions(const std::vector<std::string>& options,
+ bool ignore_unrecognized,
+ std::string* error_msg) {
+ auto parser = CreateSimpleParser(ignore_unrecognized);
+ CmdlineResult parse_result = parser.Parse(options);
+ if (!parse_result.IsSuccess()) {
+ *error_msg = parse_result.GetMessage();
+ return false;
+ }
+
+ SimpleParseArgumentMap args = parser.ReleaseArgumentsMap();
+ return ReadCompilerOptions(args, this, error_msg);
+}
+
+#pragma GCC diagnostic pop
+
} // namespace art
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index a9372c4..311dbd5 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -231,7 +231,9 @@
return no_inline_from_;
}
- bool ParseCompilerOption(const StringPiece& option, UsageFn Usage);
+ bool ParseCompilerOptions(const std::vector<std::string>& options,
+ bool ignore_unrecognized,
+ std::string* error_msg);
void SetNonPic() {
compile_pic_ = false;
@@ -258,7 +260,7 @@
}
private:
- void ParseDumpInitFailures(const StringPiece& option, UsageFn Usage);
+ bool ParseDumpInitFailures(const std::string& option, std::string* error_msg);
void ParseDumpCfgPasses(const StringPiece& option, UsageFn Usage);
void ParseInlineMaxCodeUnits(const StringPiece& option, UsageFn Usage);
void ParseNumDexMethods(const StringPiece& option, UsageFn Usage);
@@ -266,7 +268,7 @@
void ParseSmallMethodMax(const StringPiece& option, UsageFn Usage);
void ParseLargeMethodMax(const StringPiece& option, UsageFn Usage);
void ParseHugeMethodMax(const StringPiece& option, UsageFn Usage);
- void ParseRegisterAllocationStrategy(const StringPiece& option, UsageFn Usage);
+ bool ParseRegisterAllocationStrategy(const std::string& option, std::string* error_msg);
CompilerFilter::Filter compiler_filter_;
size_t huge_method_threshold_;
@@ -327,6 +329,9 @@
friend class CommonCompilerTest;
friend class verifier::VerifierDepsTest;
+ template <class Base>
+ friend bool ReadCompilerOptions(Base& map, CompilerOptions* options, std::string* error_msg);
+
DISALLOW_COPY_AND_ASSIGN(CompilerOptions);
};
diff --git a/compiler/driver/compiler_options_map-inl.h b/compiler/driver/compiler_options_map-inl.h
new file mode 100644
index 0000000..9cb818a
--- /dev/null
+++ b/compiler/driver/compiler_options_map-inl.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_INL_H_
+#define ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_INL_H_
+
+#include "compiler_options_map.h"
+
+#include <memory>
+
+#include "android-base/logging.h"
+#include "android-base/macros.h"
+#include "android-base/stringprintf.h"
+
+#include "base/macros.h"
+#include "cmdline_parser.h"
+#include "compiler_options.h"
+
+namespace art {
+
+template <class Base>
+inline bool ReadCompilerOptions(Base& map, CompilerOptions* options, std::string* error_msg) {
+ if (map.Exists(Base::CompilerFilter)) {
+ CompilerFilter::Filter compiler_filter;
+ if (!CompilerFilter::ParseCompilerFilter(map.Get(Base::CompilerFilter)->c_str(),
+ &compiler_filter)) {
+ *error_msg = android::base::StringPrintf("Unknown --compiler-filter value %s",
+ map.Get(Base::CompilerFilter)->c_str());
+ return false;
+ }
+ options->SetCompilerFilter(compiler_filter);
+ }
+ if (map.Exists(Base::PIC)) {
+ options->compile_pic_ = true;
+ }
+ map.AssignIfExists(Base::HugeMethodMaxThreshold, &options->huge_method_threshold_);
+ map.AssignIfExists(Base::LargeMethodMaxThreshold, &options->large_method_threshold_);
+ map.AssignIfExists(Base::SmallMethodMaxThreshold, &options->small_method_threshold_);
+ map.AssignIfExists(Base::TinyMethodMaxThreshold, &options->tiny_method_threshold_);
+ map.AssignIfExists(Base::NumDexMethodsThreshold, &options->num_dex_methods_threshold_);
+ map.AssignIfExists(Base::InlineMaxCodeUnitsThreshold, &options->inline_max_code_units_);
+ map.AssignIfExists(Base::GenerateDebugInfo, &options->generate_debug_info_);
+ map.AssignIfExists(Base::GenerateMiniDebugInfo, &options->generate_mini_debug_info_);
+ map.AssignIfExists(Base::GenerateBuildID, &options->generate_build_id_);
+ if (map.Exists(Base::Debuggable)) {
+ options->debuggable_ = true;
+ }
+ map.AssignIfExists(Base::TopKProfileThreshold, &options->top_k_profile_threshold_);
+ map.AssignIfExists(Base::AbortOnHardVerifierFailure, &options->abort_on_hard_verifier_failure_);
+ if (map.Exists(Base::DumpInitFailures)) {
+ if (!options->ParseDumpInitFailures(*map.Get(Base::DumpInitFailures), error_msg)) {
+ return false;
+ }
+ }
+ map.AssignIfExists(Base::DumpCFG, &options->dump_cfg_file_name_);
+ if (map.Exists(Base::DumpCFGAppend)) {
+ options->dump_cfg_append_ = true;
+ }
+ if (map.Exists(Base::RegisterAllocationStrategy)) {
+ if (!options->ParseRegisterAllocationStrategy(*map.Get(Base::DumpInitFailures), error_msg)) {
+ return false;
+ }
+ }
+ map.AssignIfExists(Base::VerboseMethods, &options->verbose_methods_);
+
+ return true;
+}
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wframe-larger-than="
+
+template <typename Map, typename Builder>
+inline void AddCompilerOptionsArgumentParserOptions(Builder& b) {
+ b.
+ Define("--compiler-filter=_")
+ .template WithType<std::string>()
+ .IntoKey(Map::CompilerFilter)
+
+ .Define("--compile-pic")
+ .IntoKey(Map::PIC)
+
+ .Define("--huge-method-max=_")
+ .template WithType<unsigned int>()
+ .IntoKey(Map::HugeMethodMaxThreshold)
+ .Define("--large-method-max=_")
+ .template WithType<unsigned int>()
+ .IntoKey(Map::LargeMethodMaxThreshold)
+ .Define("--small-method-max=_")
+ .template WithType<unsigned int>()
+ .IntoKey(Map::SmallMethodMaxThreshold)
+ .Define("--tiny-method-max=_")
+ .template WithType<unsigned int>()
+ .IntoKey(Map::TinyMethodMaxThreshold)
+ .Define("--num-dex-methods=_")
+ .template WithType<unsigned int>()
+ .IntoKey(Map::NumDexMethodsThreshold)
+ .Define("--inline-max-code-units=_")
+ .template WithType<unsigned int>()
+ .IntoKey(Map::InlineMaxCodeUnitsThreshold)
+
+ .Define({"--generate-debug-info", "-g", "--no-generate-debug-info"})
+ .WithValues({true, true, false})
+ .IntoKey(Map::GenerateDebugInfo)
+ .Define({"--generate-mini-debug-info", "--no-generate-mini-debug-info"})
+ .WithValues({true, false})
+ .IntoKey(Map::GenerateMiniDebugInfo)
+
+ .Define({"--generate-build-id", "--no-generate-build-id"})
+ .WithValues({true, false})
+ .IntoKey(Map::GenerateBuildID)
+
+ .Define("--debuggable")
+ .IntoKey(Map::Debuggable)
+
+ .Define("--top-k-profile-threshold=_")
+ .template WithType<double>().WithRange(0.0, 100.0)
+ .IntoKey(Map::TopKProfileThreshold)
+
+ .Define({"--abort-on-hard-verifier-error", "--no-abort-on-hard-verifier-error"})
+ .WithValues({true, false})
+ .IntoKey(Map::AbortOnHardVerifierFailure)
+
+ .Define("--dump-init-failures=_")
+ .template WithType<std::string>()
+ .IntoKey(Map::DumpInitFailures)
+
+ .Define("--dump-cfg=_")
+ .template WithType<std::string>()
+ .IntoKey(Map::DumpCFG)
+ .Define("--dump-cfg-append")
+ .IntoKey(Map::DumpCFGAppend)
+
+ .Define("--register-allocation-strategy=_")
+ .template WithType<std::string>()
+ .IntoKey(Map::RegisterAllocationStrategy)
+
+ .Define("--verbose-methods=_")
+ .template WithType<ParseStringList<','>>()
+ .IntoKey(Map::VerboseMethods);
+}
+
+#pragma GCC diagnostic pop
+
+} // namespace art
+
+#endif // ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_INL_H_
diff --git a/compiler/driver/compiler_options_map-storage.h b/compiler/driver/compiler_options_map-storage.h
new file mode 100644
index 0000000..756598d
--- /dev/null
+++ b/compiler/driver/compiler_options_map-storage.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_STORAGE_H_
+#define ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_STORAGE_H_
+
+// Assumes:
+// * #include "compiler_options_map.h"
+// * namespace art
+//
+// Usage:
+// #define COMPILER_OPTIONS_MAP_TYPE TheTypeOfTheMap
+// #define COMPILER_OPTIONS_MAP_KEY_TYPE TheTypeOfTheMapsKey
+// #include "driver/compiler_options_map-storage.h
+
+#ifndef COMPILER_OPTIONS_MAP_TYPE
+#error "Expected COMPILER_OPTIONS_MAP_TYPE"
+#endif
+
+#ifndef COMPILER_OPTIONS_MAP_KEY_TYPE
+#error "Expected COMPILER_OPTIONS_MAP_KEY_TYPE"
+#endif
+
+#define COMPILER_OPTIONS_KEY(Type, Name, ...) \
+ template <typename Base, template <typename TV> class KeyType> \
+ const KeyType<Type> CompilerOptionsMap<Base, KeyType>::Name {__VA_ARGS__}; // NOLINT [readability/braces] [4]
+#include <driver/compiler_options_map.def>
+
+template struct CompilerOptionsMap<COMPILER_OPTIONS_MAP_TYPE, COMPILER_OPTIONS_MAP_KEY_TYPE>;
+
+#undef COMPILER_OPTIONS_MAP_TYPE
+#undef COMPILER_OPTIONS_MAP_KEY_TYPE
+
+#endif // ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_STORAGE_H_
+#undef ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_STORAGE_H_ // Guard is only for cpplint
diff --git a/compiler/driver/compiler_options_map.def b/compiler/driver/compiler_options_map.def
new file mode 100644
index 0000000..570bc5a
--- /dev/null
+++ b/compiler/driver/compiler_options_map.def
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License")
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef COMPILER_OPTIONS_KEY
+#error "Please #define COMPILER_OPTIONS_KEY before #including this file"
+#define COMPILER_OPTIONS_KEY(...) // Don't display errors in this file in IDEs.
+#endif
+
+// This file defines the list of keys for CompilerOptionsMap.
+// These can be used with CompilerOptionsMap.Get/Set/etc, once that template class has been
+// instantiated.
+//
+// Column Descriptions:
+// <<Type>> <<Key Name>> (<<Default Value>>)
+//
+// Default values are only used by Map::GetOrDefault(K<T>).
+// If a default value is omitted here, T{} is used as the default value, which is
+// almost-always the value of the type as if it was memset to all 0.
+//
+// Please keep the columns aligned if possible when adding new rows.
+//
+
+// Parse-able keys from the command line.
+
+// TODO: Add type parser.
+COMPILER_OPTIONS_KEY (std::string, CompilerFilter)
+COMPILER_OPTIONS_KEY (Unit, PIC)
+COMPILER_OPTIONS_KEY (unsigned int, HugeMethodMaxThreshold)
+COMPILER_OPTIONS_KEY (unsigned int, LargeMethodMaxThreshold)
+COMPILER_OPTIONS_KEY (unsigned int, SmallMethodMaxThreshold)
+COMPILER_OPTIONS_KEY (unsigned int, TinyMethodMaxThreshold)
+COMPILER_OPTIONS_KEY (unsigned int, NumDexMethodsThreshold)
+COMPILER_OPTIONS_KEY (unsigned int, InlineMaxCodeUnitsThreshold)
+COMPILER_OPTIONS_KEY (bool, GenerateDebugInfo)
+COMPILER_OPTIONS_KEY (bool, GenerateMiniDebugInfo)
+COMPILER_OPTIONS_KEY (bool, GenerateBuildID)
+COMPILER_OPTIONS_KEY (Unit, Debuggable)
+COMPILER_OPTIONS_KEY (double, TopKProfileThreshold)
+COMPILER_OPTIONS_KEY (bool, AbortOnHardVerifierFailure)
+COMPILER_OPTIONS_KEY (std::string, DumpInitFailures)
+COMPILER_OPTIONS_KEY (std::string, DumpCFG)
+COMPILER_OPTIONS_KEY (Unit, DumpCFGAppend)
+// TODO: Add type parser.
+COMPILER_OPTIONS_KEY (std::string, RegisterAllocationStrategy)
+COMPILER_OPTIONS_KEY (ParseStringList<','>, VerboseMethods)
+
+#undef COMPILER_OPTIONS_KEY
diff --git a/compiler/driver/compiler_options_map.h b/compiler/driver/compiler_options_map.h
new file mode 100644
index 0000000..b9bc8b6
--- /dev/null
+++ b/compiler/driver/compiler_options_map.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_H_
+#define ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_H_
+
+#include <string>
+#include <vector>
+
+#include "base/variant_map.h"
+#include "cmdline_types.h"
+
+namespace art {
+
+// Defines a type-safe heterogeneous key->value map. This is to be used as the base for
+// an extended map.
+template <typename Base, template <typename TV> class KeyType>
+struct CompilerOptionsMap : VariantMap<Base, KeyType> {
+ // Make the next many usages of Key slightly shorter to type.
+ template <typename TValue>
+ using Key = KeyType<TValue>;
+
+ // List of key declarations, shorthand for 'static const Key<T> Name'
+#define COMPILER_OPTIONS_KEY(Type, Name, ...) static const Key<Type> (Name);
+#include "compiler_options_map.def"
+};
+
+#undef DECLARE_KEY
+
+} // namespace art
+
+#endif // ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_H_
diff --git a/compiler/driver/simple_compiler_options_map.h b/compiler/driver/simple_compiler_options_map.h
new file mode 100644
index 0000000..3860da9
--- /dev/null
+++ b/compiler/driver/simple_compiler_options_map.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This file declares a completion of the CompilerOptionsMap and should be included into a
+// .cc file, only.
+
+#ifndef ART_COMPILER_DRIVER_SIMPLE_COMPILER_OPTIONS_MAP_H_
+#define ART_COMPILER_DRIVER_SIMPLE_COMPILER_OPTIONS_MAP_H_
+
+#include <memory>
+
+#include "compiler_options_map-inl.h"
+#include "base/variant_map.h"
+
+namespace art {
+
+template <typename TValue>
+struct SimpleParseArgumentMapKey : VariantMapKey<TValue> {
+ SimpleParseArgumentMapKey() {}
+ explicit SimpleParseArgumentMapKey(TValue default_value)
+ : VariantMapKey<TValue>(std::move(default_value)) {}
+ // Don't ODR-use constexpr default values, which means that Struct::Fields
+ // that are declared 'static constexpr T Name = Value' don't need to have a matching definition.
+};
+
+struct SimpleParseArgumentMap : CompilerOptionsMap<SimpleParseArgumentMap,
+ SimpleParseArgumentMapKey> {
+ // This 'using' line is necessary to inherit the variadic constructor.
+ using CompilerOptionsMap<SimpleParseArgumentMap, SimpleParseArgumentMapKey>::CompilerOptionsMap;
+};
+
+#define COMPILER_OPTIONS_MAP_TYPE SimpleParseArgumentMap
+#define COMPILER_OPTIONS_MAP_KEY_TYPE SimpleParseArgumentMapKey
+#include "compiler_options_map-storage.h"
+
+using Parser = CmdlineParser<SimpleParseArgumentMap, SimpleParseArgumentMapKey>;
+
+static inline Parser CreateSimpleParser(bool ignore_unrecognized) {
+ std::unique_ptr<Parser::Builder> parser_builder =
+ std::unique_ptr<Parser::Builder>(new Parser::Builder());
+
+ AddCompilerOptionsArgumentParserOptions<SimpleParseArgumentMap>(*parser_builder);
+
+ parser_builder->IgnoreUnrecognized(ignore_unrecognized);
+
+ return parser_builder->Build();
+}
+
+} // namespace art
+
+#endif // ART_COMPILER_DRIVER_SIMPLE_COMPILER_OPTIONS_MAP_H_
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 511a44a..5c89869 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -78,21 +78,16 @@
}
}
-// Callers of this method assume it has NO_RETURN.
-NO_RETURN static void Usage(const char* fmt, ...) {
- va_list ap;
- va_start(ap, fmt);
- std::string error;
- android::base::StringAppendV(&error, fmt, ap);
- LOG(FATAL) << error;
- va_end(ap);
- exit(EXIT_FAILURE);
-}
-
JitCompiler::JitCompiler() {
compiler_options_.reset(new CompilerOptions());
- for (const std::string& argument : Runtime::Current()->GetCompilerOptions()) {
- compiler_options_->ParseCompilerOption(argument, Usage);
+ {
+ std::string error_msg;
+ if (!compiler_options_->ParseCompilerOptions(Runtime::Current()->GetCompilerOptions(),
+ true /* ignore_unrecognized */,
+ &error_msg)) {
+ LOG(FATAL) << error_msg;
+ UNREACHABLE();
+ }
}
// JIT is never PIC, no matter what the runtime compiler options specify.
compiler_options_->SetNonPic();
diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc
index 347f4ea..28709a1 100644
--- a/compiler/jni/jni_cfi_test.cc
+++ b/compiler/jni/jni_cfi_test.cc
@@ -84,7 +84,7 @@
callee_save_regs, mr_conv->EntrySpills());
jni_asm->IncreaseFrameSize(32);
jni_asm->DecreaseFrameSize(32);
- jni_asm->RemoveFrame(frame_size, callee_save_regs);
+ jni_asm->RemoveFrame(frame_size, callee_save_regs, /* may_suspend */ true);
jni_asm->FinalizeCode();
std::vector<uint8_t> actual_asm(jni_asm->CodeSize());
MemoryRegion code(&actual_asm[0], actual_asm.size());
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index 292ce10..3afd701 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -110,23 +110,31 @@
// Calling convention
ManagedRegister Arm64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
// X20 is safe to use as a scratch register:
- // - with Baker read barriers, it is reserved as Marking Register,
- // and thus does not actually need to be saved/restored; it is
- // refreshed on exit (see Arm64JNIMacroAssembler::RemoveFrame);
+ // - with Baker read barriers (in the case of a non-critical native
+ // method), it is reserved as Marking Register, and thus does not
+ // actually need to be saved/restored; it is refreshed on exit
+ // (see Arm64JNIMacroAssembler::RemoveFrame);
// - in other cases, it is saved on entry (in
// Arm64JNIMacroAssembler::BuildFrame) and restored on exit (in
- // Arm64JNIMacroAssembler::RemoveFrame).
+ // Arm64JNIMacroAssembler::RemoveFrame). This is also expected in
+ // the case of a critical native method in the Baker read barrier
+ // configuration, where the value of MR must be preserved across
+ // the JNI call (as there is no MR refresh in that case).
return Arm64ManagedRegister::FromXRegister(X20);
}
ManagedRegister Arm64JniCallingConvention::InterproceduralScratchRegister() {
// X20 is safe to use as a scratch register:
- // - with Baker read barriers, it is reserved as Marking Register,
- // and thus does not actually need to be saved/restored; it is
- // refreshed on exit (see Arm64JNIMacroAssembler::RemoveFrame);
+ // - with Baker read barriers (in the case of a non-critical native
+ // method), it is reserved as Marking Register, and thus does not
+ // actually need to be saved/restored; it is refreshed on exit
+ // (see Arm64JNIMacroAssembler::RemoveFrame);
// - in other cases, it is saved on entry (in
// Arm64JNIMacroAssembler::BuildFrame) and restored on exit (in
- // Arm64JNIMacroAssembler::RemoveFrame).
+ // Arm64JNIMacroAssembler::RemoveFrame). This is also expected in
+ // the case of a critical native method in the Baker read barrier
+ // configuration, where the value of MR must be preserved across
+ // the JNI call (as there is no MR refresh in that case).
return Arm64ManagedRegister::FromXRegister(X20);
}
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index 36a87a8..42a5f86 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -47,7 +47,7 @@
// Managed runtime calling convention
std::unique_ptr<ManagedRuntimeCallingConvention> ManagedRuntimeCallingConvention::Create(
- ArenaAllocator* arena,
+ ArenaAllocator* allocator,
bool is_static,
bool is_synchronized,
const char* shorty,
@@ -57,35 +57,37 @@
case kArm:
case kThumb2:
return std::unique_ptr<ManagedRuntimeCallingConvention>(
- new (arena) arm::ArmManagedRuntimeCallingConvention(is_static, is_synchronized, shorty));
+ new (allocator) arm::ArmManagedRuntimeCallingConvention(
+ is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64:
return std::unique_ptr<ManagedRuntimeCallingConvention>(
- new (arena) arm64::Arm64ManagedRuntimeCallingConvention(
+ new (allocator) arm64::Arm64ManagedRuntimeCallingConvention(
is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_mips
case kMips:
return std::unique_ptr<ManagedRuntimeCallingConvention>(
- new (arena) mips::MipsManagedRuntimeCallingConvention(
+ new (allocator) mips::MipsManagedRuntimeCallingConvention(
is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64:
return std::unique_ptr<ManagedRuntimeCallingConvention>(
- new (arena) mips64::Mips64ManagedRuntimeCallingConvention(
+ new (allocator) mips64::Mips64ManagedRuntimeCallingConvention(
is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_x86
case kX86:
return std::unique_ptr<ManagedRuntimeCallingConvention>(
- new (arena) x86::X86ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty));
+ new (allocator) x86::X86ManagedRuntimeCallingConvention(
+ is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64:
return std::unique_ptr<ManagedRuntimeCallingConvention>(
- new (arena) x86_64::X86_64ManagedRuntimeCallingConvention(
+ new (allocator) x86_64::X86_64ManagedRuntimeCallingConvention(
is_static, is_synchronized, shorty));
#endif
default:
@@ -146,7 +148,7 @@
// JNI calling convention
-std::unique_ptr<JniCallingConvention> JniCallingConvention::Create(ArenaAllocator* arena,
+std::unique_ptr<JniCallingConvention> JniCallingConvention::Create(ArenaAllocator* allocator,
bool is_static,
bool is_synchronized,
bool is_critical_native,
@@ -157,50 +159,38 @@
case kArm:
case kThumb2:
return std::unique_ptr<JniCallingConvention>(
- new (arena) arm::ArmJniCallingConvention(is_static,
- is_synchronized,
- is_critical_native,
- shorty));
+ new (allocator) arm::ArmJniCallingConvention(
+ is_static, is_synchronized, is_critical_native, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64:
return std::unique_ptr<JniCallingConvention>(
- new (arena) arm64::Arm64JniCallingConvention(is_static,
- is_synchronized,
- is_critical_native,
- shorty));
+ new (allocator) arm64::Arm64JniCallingConvention(
+ is_static, is_synchronized, is_critical_native, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_mips
case kMips:
return std::unique_ptr<JniCallingConvention>(
- new (arena) mips::MipsJniCallingConvention(is_static,
- is_synchronized,
- is_critical_native,
- shorty));
+ new (allocator) mips::MipsJniCallingConvention(
+ is_static, is_synchronized, is_critical_native, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64:
return std::unique_ptr<JniCallingConvention>(
- new (arena) mips64::Mips64JniCallingConvention(is_static,
- is_synchronized,
- is_critical_native,
- shorty));
+ new (allocator) mips64::Mips64JniCallingConvention(
+ is_static, is_synchronized, is_critical_native, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_x86
case kX86:
return std::unique_ptr<JniCallingConvention>(
- new (arena) x86::X86JniCallingConvention(is_static,
- is_synchronized,
- is_critical_native,
- shorty));
+ new (allocator) x86::X86JniCallingConvention(
+ is_static, is_synchronized, is_critical_native, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64:
return std::unique_ptr<JniCallingConvention>(
- new (arena) x86_64::X86_64JniCallingConvention(is_static,
- is_synchronized,
- is_critical_native,
- shorty));
+ new (allocator) x86_64::X86_64JniCallingConvention(
+ is_static, is_synchronized, is_critical_native, shorty));
#endif
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index 335a2df..be0bd72 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -231,7 +231,7 @@
// | { Method* } | <-- SP
class ManagedRuntimeCallingConvention : public CallingConvention {
public:
- static std::unique_ptr<ManagedRuntimeCallingConvention> Create(ArenaAllocator* arena,
+ static std::unique_ptr<ManagedRuntimeCallingConvention> Create(ArenaAllocator* allocator,
bool is_static,
bool is_synchronized,
const char* shorty,
@@ -284,7 +284,7 @@
// callee saves for frames above this one.
class JniCallingConvention : public CallingConvention {
public:
- static std::unique_ptr<JniCallingConvention> Create(ArenaAllocator* arena,
+ static std::unique_ptr<JniCallingConvention> Create(ArenaAllocator* allocator,
bool is_static,
bool is_synchronized,
bool is_critical_native,
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index c66a2a6..92b5c4d 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -66,8 +66,8 @@
template <PointerSize kPointerSize>
static std::unique_ptr<JNIMacroAssembler<kPointerSize>> GetMacroAssembler(
- ArenaAllocator* arena, InstructionSet isa, const InstructionSetFeatures* features) {
- return JNIMacroAssembler<kPointerSize>::Create(arena, isa, features);
+ ArenaAllocator* allocator, InstructionSet isa, const InstructionSetFeatures* features) {
+ return JNIMacroAssembler<kPointerSize>::Create(allocator, isa, features);
}
enum class JniEntrypoint {
@@ -646,7 +646,10 @@
// 16. Remove activation - need to restore callee save registers since the GC may have changed
// them.
DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size));
- __ RemoveFrame(frame_size, callee_save_regs);
+ // We expect the compiled method to possibly be suspended during its
+ // execution, except in the case of a CriticalNative method.
+ bool may_suspend = !is_critical_native;
+ __ RemoveFrame(frame_size, callee_save_regs, may_suspend);
DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size));
// 17. Finalize code generation
diff --git a/compiler/optimizing/block_builder.h b/compiler/optimizing/block_builder.h
index 6adce81..4a0f78c 100644
--- a/compiler/optimizing/block_builder.h
+++ b/compiler/optimizing/block_builder.h
@@ -29,7 +29,7 @@
HBasicBlockBuilder(HGraph* graph,
const DexFile* const dex_file,
const DexFile::CodeItem& code_item)
- : arena_(graph->GetArena()),
+ : arena_(graph->GetAllocator()),
graph_(graph),
dex_file_(dex_file),
code_item_(code_item),
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index a7f7bce..0255e73 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -513,18 +513,18 @@
maps_(graph->GetBlocks().size(),
ArenaSafeMap<int, ValueRange*>(
std::less<int>(),
- graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
- graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
+ graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
+ graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
first_index_bounds_check_map_(
std::less<int>(),
- graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
+ graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
early_exit_loop_(
std::less<uint32_t>(),
- graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
+ graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
taken_test_loop_(
std::less<uint32_t>(),
- graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
- finite_loop_(graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
+ graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
+ finite_loop_(graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
has_dom_based_dynamic_bce_(false),
initial_block_size_(graph->GetBlocks().size()),
side_effects_(side_effects),
@@ -668,8 +668,8 @@
if (successor != nullptr) {
bool overflow;
bool underflow;
- ValueRange* new_left_range = new (GetGraph()->GetArena()) ValueRange(
- GetGraph()->GetArena(),
+ ValueRange* new_left_range = new (GetGraph()->GetAllocator()) ValueRange(
+ GetGraph()->GetAllocator(),
left_range->GetBound(),
right_range->GetBound().Add(left_compensation, &overflow, &underflow));
if (!overflow && !underflow) {
@@ -677,8 +677,8 @@
new_left_range);
}
- ValueRange* new_right_range = new (GetGraph()->GetArena()) ValueRange(
- GetGraph()->GetArena(),
+ ValueRange* new_right_range = new (GetGraph()->GetAllocator()) ValueRange(
+ GetGraph()->GetAllocator(),
left_range->GetBound().Add(right_compensation, &overflow, &underflow),
right_range->GetBound());
if (!overflow && !underflow) {
@@ -750,8 +750,8 @@
if (overflow || underflow) {
return;
}
- ValueRange* new_range = new (GetGraph()->GetArena())
- ValueRange(GetGraph()->GetArena(), ValueBound::Min(), new_upper);
+ ValueRange* new_range = new (GetGraph()->GetAllocator())
+ ValueRange(GetGraph()->GetAllocator(), ValueBound::Min(), new_upper);
ApplyRangeFromComparison(left, block, true_successor, new_range);
}
@@ -762,8 +762,8 @@
if (overflow || underflow) {
return;
}
- ValueRange* new_range = new (GetGraph()->GetArena())
- ValueRange(GetGraph()->GetArena(), new_lower, ValueBound::Max());
+ ValueRange* new_range = new (GetGraph()->GetAllocator())
+ ValueRange(GetGraph()->GetAllocator(), new_lower, ValueBound::Max());
ApplyRangeFromComparison(left, block, false_successor, new_range);
}
} else if (cond == kCondGT || cond == kCondGE) {
@@ -774,8 +774,8 @@
if (overflow || underflow) {
return;
}
- ValueRange* new_range = new (GetGraph()->GetArena())
- ValueRange(GetGraph()->GetArena(), new_lower, ValueBound::Max());
+ ValueRange* new_range = new (GetGraph()->GetAllocator())
+ ValueRange(GetGraph()->GetAllocator(), new_lower, ValueBound::Max());
ApplyRangeFromComparison(left, block, true_successor, new_range);
}
@@ -785,8 +785,8 @@
if (overflow || underflow) {
return;
}
- ValueRange* new_range = new (GetGraph()->GetArena())
- ValueRange(GetGraph()->GetArena(), ValueBound::Min(), new_upper);
+ ValueRange* new_range = new (GetGraph()->GetAllocator())
+ ValueRange(GetGraph()->GetAllocator(), ValueBound::Min(), new_upper);
ApplyRangeFromComparison(left, block, false_successor, new_range);
}
} else if (cond == kCondNE || cond == kCondEQ) {
@@ -795,8 +795,8 @@
// length == [c,d] yields [c, d] along true
// length != [c,d] yields [c, d] along false
if (!lower.Equals(ValueBound::Min()) || !upper.Equals(ValueBound::Max())) {
- ValueRange* new_range = new (GetGraph()->GetArena())
- ValueRange(GetGraph()->GetArena(), lower, upper);
+ ValueRange* new_range = new (GetGraph()->GetAllocator())
+ ValueRange(GetGraph()->GetAllocator(), lower, upper);
ApplyRangeFromComparison(
left, block, cond == kCondEQ ? true_successor : false_successor, new_range);
}
@@ -804,8 +804,8 @@
// length == 0 yields [1, max] along false
// length != 0 yields [1, max] along true
if (lower.GetConstant() == 0 && upper.GetConstant() == 0) {
- ValueRange* new_range = new (GetGraph()->GetArena())
- ValueRange(GetGraph()->GetArena(), ValueBound(nullptr, 1), ValueBound::Max());
+ ValueRange* new_range = new (GetGraph()->GetAllocator())
+ ValueRange(GetGraph()->GetAllocator(), ValueBound(nullptr, 1), ValueBound::Max());
ApplyRangeFromComparison(
left, block, cond == kCondEQ ? false_successor : true_successor, new_range);
}
@@ -826,7 +826,7 @@
// Non-constant index.
ValueBound lower = ValueBound(nullptr, 0); // constant 0
ValueBound upper = ValueBound(array_length, -1); // array_length - 1
- ValueRange array_range(GetGraph()->GetArena(), lower, upper);
+ ValueRange array_range(GetGraph()->GetAllocator(), lower, upper);
// Try index range obtained by dominator-based analysis.
ValueRange* index_range = LookupValueRange(index, block);
if (index_range != nullptr && index_range->FitsIn(&array_range)) {
@@ -875,8 +875,8 @@
} else {
ValueBound lower = ValueBound(nullptr, constant + 1);
ValueBound upper = ValueBound::Max();
- ValueRange* range = new (GetGraph()->GetArena())
- ValueRange(GetGraph()->GetArena(), lower, upper);
+ ValueRange* range = new (GetGraph()->GetAllocator())
+ ValueRange(GetGraph()->GetAllocator(), lower, upper);
AssignRange(block, array_length, range);
}
}
@@ -938,8 +938,8 @@
ValueRange* range = nullptr;
if (increment == 0) {
// Add constant 0. It's really a fixed value.
- range = new (GetGraph()->GetArena()) ValueRange(
- GetGraph()->GetArena(),
+ range = new (GetGraph()->GetAllocator()) ValueRange(
+ GetGraph()->GetAllocator(),
ValueBound(initial_value, 0),
ValueBound(initial_value, 0));
} else {
@@ -959,8 +959,8 @@
bound = increment > 0 ? ValueBound::Min() : ValueBound::Max();
}
}
- range = new (GetGraph()->GetArena()) MonotonicValueRange(
- GetGraph()->GetArena(),
+ range = new (GetGraph()->GetAllocator()) MonotonicValueRange(
+ GetGraph()->GetAllocator(),
phi,
initial_value,
increment,
@@ -1039,8 +1039,8 @@
!ValueBound::WouldAddOverflowOrUnderflow(c0, -c1)) {
if ((c0 - c1) <= 0) {
// array.length + (c0 - c1) won't overflow/underflow.
- ValueRange* range = new (GetGraph()->GetArena()) ValueRange(
- GetGraph()->GetArena(),
+ ValueRange* range = new (GetGraph()->GetAllocator()) ValueRange(
+ GetGraph()->GetAllocator(),
ValueBound(nullptr, right_const - upper.GetConstant()),
ValueBound(array_length, right_const - lower.GetConstant()));
AssignRange(sub->GetBlock(), sub, range);
@@ -1087,8 +1087,8 @@
// than array_length.
return;
}
- ValueRange* range = new (GetGraph()->GetArena()) ValueRange(
- GetGraph()->GetArena(),
+ ValueRange* range = new (GetGraph()->GetAllocator()) ValueRange(
+ GetGraph()->GetAllocator(),
ValueBound(nullptr, std::numeric_limits<int32_t>::min()),
ValueBound(left, 0));
AssignRange(instruction->GetBlock(), instruction, range);
@@ -1113,8 +1113,8 @@
if (constant > 0) {
// constant serves as a mask so any number masked with it
// gets a [0, constant] value range.
- ValueRange* range = new (GetGraph()->GetArena()) ValueRange(
- GetGraph()->GetArena(),
+ ValueRange* range = new (GetGraph()->GetAllocator()) ValueRange(
+ GetGraph()->GetAllocator(),
ValueBound(nullptr, 0),
ValueBound(nullptr, constant));
AssignRange(instruction->GetBlock(), instruction, range);
@@ -1139,8 +1139,8 @@
// array[i % 10]; // index value range [0, 9]
// array[i % -10]; // index value range [0, 9]
// }
- ValueRange* right_range = new (GetGraph()->GetArena()) ValueRange(
- GetGraph()->GetArena(),
+ ValueRange* right_range = new (GetGraph()->GetAllocator()) ValueRange(
+ GetGraph()->GetAllocator(),
ValueBound(nullptr, 1 - right_const),
ValueBound(nullptr, right_const - 1));
@@ -1169,8 +1169,8 @@
if (right->IsArrayLength()) {
ValueBound lower = ValueBound::Min(); // ideally, lower should be '1-array_length'.
ValueBound upper = ValueBound(right, -1); // array_length - 1
- ValueRange* right_range = new (GetGraph()->GetArena()) ValueRange(
- GetGraph()->GetArena(),
+ ValueRange* right_range = new (GetGraph()->GetAllocator()) ValueRange(
+ GetGraph()->GetAllocator(),
lower,
upper);
ValueRange* left_range = LookupValueRange(left, instruction->GetBlock());
@@ -1195,8 +1195,8 @@
// which isn't available as an instruction yet. new_array will
// be treated the same as new_array.length when it's used in a ValueBound.
ValueBound upper = ValueBound(new_array, -right_const);
- ValueRange* range = new (GetGraph()->GetArena())
- ValueRange(GetGraph()->GetArena(), lower, upper);
+ ValueRange* range = new (GetGraph()->GetAllocator())
+ ValueRange(GetGraph()->GetAllocator(), lower, upper);
ValueRange* existing_range = LookupValueRange(left, new_array->GetBlock());
if (existing_range != nullptr) {
range = existing_range->Narrow(range);
@@ -1260,14 +1260,15 @@
if (base == nullptr) {
DCHECK_GE(min_c, 0);
} else {
- HInstruction* lower = new (GetGraph()->GetArena())
+ HInstruction* lower = new (GetGraph()->GetAllocator())
HAdd(DataType::Type::kInt32, base, GetGraph()->GetIntConstant(min_c));
- upper = new (GetGraph()->GetArena()) HAdd(DataType::Type::kInt32, base, upper);
+ upper = new (GetGraph()->GetAllocator()) HAdd(DataType::Type::kInt32, base, upper);
block->InsertInstructionBefore(lower, bounds_check);
block->InsertInstructionBefore(upper, bounds_check);
- InsertDeoptInBlock(bounds_check, new (GetGraph()->GetArena()) HAbove(lower, upper));
+ InsertDeoptInBlock(bounds_check, new (GetGraph()->GetAllocator()) HAbove(lower, upper));
}
- InsertDeoptInBlock(bounds_check, new (GetGraph()->GetArena()) HAboveOrEqual(upper, array_length));
+ InsertDeoptInBlock(
+ bounds_check, new (GetGraph()->GetAllocator()) HAboveOrEqual(upper, array_length));
// Flag that this kind of deoptimization has occurred.
has_dom_based_dynamic_bce_ = true;
}
@@ -1291,9 +1292,9 @@
int32_t min_c = base == nullptr ? 0 : value.GetConstant();
int32_t max_c = value.GetConstant();
ArenaVector<HBoundsCheck*> candidates(
- GetGraph()->GetArena()->Adapter(kArenaAllocBoundsCheckElimination));
+ GetGraph()->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination));
ArenaVector<HBoundsCheck*> standby(
- GetGraph()->GetArena()->Adapter(kArenaAllocBoundsCheckElimination));
+ GetGraph()->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination));
for (const HUseListNode<HInstruction*>& use : array_length->GetUses()) {
// Another bounds check in same or dominated block?
HInstruction* user = use.GetUser();
@@ -1377,7 +1378,7 @@
v2.is_known && (v2.a_constant == 0 || v2.a_constant == 1)) {
DCHECK(v1.a_constant == 1 || v1.instruction == nullptr);
DCHECK(v2.a_constant == 1 || v2.instruction == nullptr);
- ValueRange index_range(GetGraph()->GetArena(),
+ ValueRange index_range(GetGraph()->GetAllocator(),
ValueBound(v1.instruction, v1.b_constant),
ValueBound(v2.instruction, v2.b_constant));
// If analysis reveals a certain OOB, disable dynamic BCE. Otherwise,
@@ -1410,9 +1411,9 @@
int32_t min_c = base == nullptr ? 0 : value.GetConstant();
int32_t max_c = value.GetConstant();
ArenaVector<HBoundsCheck*> candidates(
- GetGraph()->GetArena()->Adapter(kArenaAllocBoundsCheckElimination));
+ GetGraph()->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination));
ArenaVector<HBoundsCheck*> standby(
- GetGraph()->GetArena()->Adapter(kArenaAllocBoundsCheckElimination));
+ GetGraph()->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination));
for (const HUseListNode<HInstruction*>& use : array_length->GetUses()) {
HInstruction* user = use.GetUser();
if (user->IsBoundsCheck() && loop == user->GetBlock()->GetLoopInformation()) {
@@ -1498,7 +1499,8 @@
if (min_c != max_c) {
DCHECK(min_lower == nullptr && min_upper != nullptr &&
max_lower == nullptr && max_upper != nullptr);
- InsertDeoptInLoop(loop, block, new (GetGraph()->GetArena()) HAbove(min_upper, max_upper));
+ InsertDeoptInLoop(
+ loop, block, new (GetGraph()->GetAllocator()) HAbove(min_upper, max_upper));
} else {
DCHECK(min_lower == nullptr && min_upper == nullptr &&
max_lower == nullptr && max_upper != nullptr);
@@ -1508,15 +1510,17 @@
if (min_c != max_c) {
DCHECK(min_lower != nullptr && min_upper != nullptr &&
max_lower != nullptr && max_upper != nullptr);
- InsertDeoptInLoop(loop, block, new (GetGraph()->GetArena()) HAbove(min_lower, max_lower));
+ InsertDeoptInLoop(
+ loop, block, new (GetGraph()->GetAllocator()) HAbove(min_lower, max_lower));
} else {
DCHECK(min_lower == nullptr && min_upper == nullptr &&
max_lower != nullptr && max_upper != nullptr);
}
- InsertDeoptInLoop(loop, block, new (GetGraph()->GetArena()) HAbove(max_lower, max_upper));
+ InsertDeoptInLoop(
+ loop, block, new (GetGraph()->GetAllocator()) HAbove(max_lower, max_upper));
}
InsertDeoptInLoop(
- loop, block, new (GetGraph()->GetArena()) HAboveOrEqual(max_upper, array_length));
+ loop, block, new (GetGraph()->GetAllocator()) HAboveOrEqual(max_upper, array_length));
} else {
// TODO: if rejected, avoid doing this again for subsequent instructions in this set?
}
@@ -1610,7 +1614,7 @@
TransformLoopForDeoptimizationIfNeeded(loop, needs_taken_test);
HBasicBlock* block = GetPreHeader(loop, check);
HInstruction* cond =
- new (GetGraph()->GetArena()) HEqual(array, GetGraph()->GetNullConstant());
+ new (GetGraph()->GetAllocator()) HEqual(array, GetGraph()->GetNullConstant());
InsertDeoptInLoop(loop, block, cond, /* is_null_check */ true);
ReplaceInstruction(check, array);
return true;
@@ -1685,8 +1689,8 @@
block->InsertInstructionBefore(condition, block->GetLastInstruction());
DeoptimizationKind kind =
is_null_check ? DeoptimizationKind::kLoopNullBCE : DeoptimizationKind::kLoopBoundsBCE;
- HDeoptimize* deoptimize = new (GetGraph()->GetArena()) HDeoptimize(
- GetGraph()->GetArena(), condition, kind, suspend->GetDexPc());
+ HDeoptimize* deoptimize = new (GetGraph()->GetAllocator()) HDeoptimize(
+ GetGraph()->GetAllocator(), condition, kind, suspend->GetDexPc());
block->InsertInstructionBefore(deoptimize, block->GetLastInstruction());
if (suspend->HasEnvironment()) {
deoptimize->CopyEnvironmentFromWithLoopPhiAdjustment(
@@ -1698,8 +1702,11 @@
void InsertDeoptInBlock(HBoundsCheck* bounds_check, HInstruction* condition) {
HBasicBlock* block = bounds_check->GetBlock();
block->InsertInstructionBefore(condition, bounds_check);
- HDeoptimize* deoptimize = new (GetGraph()->GetArena()) HDeoptimize(
- GetGraph()->GetArena(), condition, DeoptimizationKind::kBlockBCE, bounds_check->GetDexPc());
+ HDeoptimize* deoptimize = new (GetGraph()->GetAllocator()) HDeoptimize(
+ GetGraph()->GetAllocator(),
+ condition,
+ DeoptimizationKind::kBlockBCE,
+ bounds_check->GetDexPc());
block->InsertInstructionBefore(deoptimize, bounds_check);
deoptimize->CopyEnvironmentFrom(bounds_check->GetEnvironment());
}
@@ -1763,18 +1770,18 @@
HBasicBlock* false_block = if_block->GetSuccessors()[1]; // False successor.
// Goto instructions.
- true_block->AddInstruction(new (GetGraph()->GetArena()) HGoto());
- false_block->AddInstruction(new (GetGraph()->GetArena()) HGoto());
- new_preheader->AddInstruction(new (GetGraph()->GetArena()) HGoto());
+ true_block->AddInstruction(new (GetGraph()->GetAllocator()) HGoto());
+ false_block->AddInstruction(new (GetGraph()->GetAllocator()) HGoto());
+ new_preheader->AddInstruction(new (GetGraph()->GetAllocator()) HGoto());
// Insert the taken-test to see if the loop body is entered. If the
// loop isn't entered at all, it jumps around the deoptimization block.
- if_block->AddInstruction(new (GetGraph()->GetArena()) HGoto()); // placeholder
+ if_block->AddInstruction(new (GetGraph()->GetAllocator()) HGoto()); // placeholder
HInstruction* condition = induction_range_.GenerateTakenTest(
header->GetLastInstruction(), GetGraph(), if_block);
DCHECK(condition != nullptr);
if_block->RemoveInstruction(if_block->GetLastInstruction());
- if_block->AddInstruction(new (GetGraph()->GetArena()) HIf(condition));
+ if_block->AddInstruction(new (GetGraph()->GetAllocator()) HIf(condition));
taken_test_loop_.Put(loop_id, true_block);
}
@@ -1853,8 +1860,8 @@
case DataType::Type::kFloat64: zero = graph->GetDoubleConstant(0); break;
default: zero = graph->GetConstant(type, 0); break;
}
- HPhi* phi = new (graph->GetArena())
- HPhi(graph->GetArena(), kNoRegNumber, /*number_of_inputs*/ 2, HPhi::ToPhiType(type));
+ HPhi* phi = new (graph->GetAllocator())
+ HPhi(graph->GetAllocator(), kNoRegNumber, /*number_of_inputs*/ 2, HPhi::ToPhiType(type));
phi->SetRawInputAt(0, instruction);
phi->SetRawInputAt(1, zero);
if (type == DataType::Type::kReference) {
diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc
index 851838c..1523478 100644
--- a/compiler/optimizing/bounds_check_elimination_test.cc
+++ b/compiler/optimizing/bounds_check_elimination_test.cc
@@ -32,10 +32,9 @@
/**
* Fixture class for the BoundsCheckElimination tests.
*/
-class BoundsCheckEliminationTest : public testing::Test {
+class BoundsCheckEliminationTest : public OptimizingUnitTest {
public:
- BoundsCheckEliminationTest() : pool_(), allocator_(&pool_) {
- graph_ = CreateGraph(&allocator_);
+ BoundsCheckEliminationTest() : graph_(CreateGraph()) {
graph_->SetHasBoundsChecks(true);
}
@@ -57,8 +56,6 @@
BoundsCheckElimination(graph_, side_effects, &induction).Run();
}
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
};
@@ -67,12 +64,12 @@
// else if (i >= array.length) { array[i] = 1; // Can't eliminate. }
// else { array[i] = 1; // Can eliminate. }
TEST_F(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
- HInstruction* parameter1 = new (&allocator_) HParameterValue(
+ HInstruction* parameter1 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); // array
- HInstruction* parameter2 = new (&allocator_) HParameterValue(
+ HInstruction* parameter2 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); // i
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
@@ -80,70 +77,70 @@
HInstruction* constant_1 = graph_->GetIntConstant(1);
HInstruction* constant_0 = graph_->GetIntConstant(0);
- HBasicBlock* block1 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block1 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block1);
- HInstruction* cmp = new (&allocator_) HGreaterThanOrEqual(parameter2, constant_0);
- HIf* if_inst = new (&allocator_) HIf(cmp);
+ HInstruction* cmp = new (GetAllocator()) HGreaterThanOrEqual(parameter2, constant_0);
+ HIf* if_inst = new (GetAllocator()) HIf(cmp);
block1->AddInstruction(cmp);
block1->AddInstruction(if_inst);
entry->AddSuccessor(block1);
- HBasicBlock* block2 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block2 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block2);
- HNullCheck* null_check = new (&allocator_) HNullCheck(parameter1, 0);
- HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0);
- HBoundsCheck* bounds_check2 = new (&allocator_)
+ HNullCheck* null_check = new (GetAllocator()) HNullCheck(parameter1, 0);
+ HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HBoundsCheck* bounds_check2 = new (GetAllocator())
HBoundsCheck(parameter2, array_length, 0);
- HArraySet* array_set = new (&allocator_) HArraySet(
+ HArraySet* array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check2, constant_1, DataType::Type::kInt32, 0);
block2->AddInstruction(null_check);
block2->AddInstruction(array_length);
block2->AddInstruction(bounds_check2);
block2->AddInstruction(array_set);
- HBasicBlock* block3 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block3 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block3);
- null_check = new (&allocator_) HNullCheck(parameter1, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- cmp = new (&allocator_) HLessThan(parameter2, array_length);
- if_inst = new (&allocator_) HIf(cmp);
+ null_check = new (GetAllocator()) HNullCheck(parameter1, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ cmp = new (GetAllocator()) HLessThan(parameter2, array_length);
+ if_inst = new (GetAllocator()) HIf(cmp);
block3->AddInstruction(null_check);
block3->AddInstruction(array_length);
block3->AddInstruction(cmp);
block3->AddInstruction(if_inst);
- HBasicBlock* block4 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block4 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block4);
- null_check = new (&allocator_) HNullCheck(parameter1, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HBoundsCheck* bounds_check4 = new (&allocator_)
+ null_check = new (GetAllocator()) HNullCheck(parameter1, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HBoundsCheck* bounds_check4 = new (GetAllocator())
HBoundsCheck(parameter2, array_length, 0);
- array_set = new (&allocator_) HArraySet(
+ array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check4, constant_1, DataType::Type::kInt32, 0);
block4->AddInstruction(null_check);
block4->AddInstruction(array_length);
block4->AddInstruction(bounds_check4);
block4->AddInstruction(array_set);
- HBasicBlock* block5 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block5 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block5);
- null_check = new (&allocator_) HNullCheck(parameter1, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HBoundsCheck* bounds_check5 = new (&allocator_)
+ null_check = new (GetAllocator()) HNullCheck(parameter1, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HBoundsCheck* bounds_check5 = new (GetAllocator())
HBoundsCheck(parameter2, array_length, 0);
- array_set = new (&allocator_) HArraySet(
+ array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check5, constant_1, DataType::Type::kInt32, 0);
block5->AddInstruction(null_check);
block5->AddInstruction(array_length);
block5->AddInstruction(bounds_check5);
block5->AddInstruction(array_set);
- HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(exit);
block2->AddSuccessor(exit);
block4->AddSuccessor(exit);
block5->AddSuccessor(exit);
- exit->AddInstruction(new (&allocator_) HExit());
+ exit->AddInstruction(new (GetAllocator()) HExit());
block1->AddSuccessor(block3); // True successor
block1->AddSuccessor(block2); // False successor
@@ -164,12 +161,12 @@
// if (j < array.length) array[j] = 1; // Can't eliminate.
// }
TEST_F(BoundsCheckEliminationTest, OverflowArrayBoundsElimination) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
- HInstruction* parameter1 = new (&allocator_) HParameterValue(
+ HInstruction* parameter1 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); // array
- HInstruction* parameter2 = new (&allocator_) HParameterValue(
+ HInstruction* parameter2 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); // i
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
@@ -178,39 +175,40 @@
HInstruction* constant_0 = graph_->GetIntConstant(0);
HInstruction* constant_max_int = graph_->GetIntConstant(INT_MAX);
- HBasicBlock* block1 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block1 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block1);
- HInstruction* cmp = new (&allocator_) HLessThanOrEqual(parameter2, constant_0);
- HIf* if_inst = new (&allocator_) HIf(cmp);
+ HInstruction* cmp = new (GetAllocator()) HLessThanOrEqual(parameter2, constant_0);
+ HIf* if_inst = new (GetAllocator()) HIf(cmp);
block1->AddInstruction(cmp);
block1->AddInstruction(if_inst);
entry->AddSuccessor(block1);
- HBasicBlock* block2 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block2 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block2);
- HInstruction* add = new (&allocator_) HAdd(DataType::Type::kInt32, parameter2, constant_max_int);
- HNullCheck* null_check = new (&allocator_) HNullCheck(parameter1, 0);
- HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0);
- HInstruction* cmp2 = new (&allocator_) HGreaterThanOrEqual(add, array_length);
- if_inst = new (&allocator_) HIf(cmp2);
+ HInstruction* add =
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, parameter2, constant_max_int);
+ HNullCheck* null_check = new (GetAllocator()) HNullCheck(parameter1, 0);
+ HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HInstruction* cmp2 = new (GetAllocator()) HGreaterThanOrEqual(add, array_length);
+ if_inst = new (GetAllocator()) HIf(cmp2);
block2->AddInstruction(add);
block2->AddInstruction(null_check);
block2->AddInstruction(array_length);
block2->AddInstruction(cmp2);
block2->AddInstruction(if_inst);
- HBasicBlock* block3 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block3 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block3);
- HBoundsCheck* bounds_check = new (&allocator_)
+ HBoundsCheck* bounds_check = new (GetAllocator())
HBoundsCheck(add, array_length, 0);
- HArraySet* array_set = new (&allocator_) HArraySet(
+ HArraySet* array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check, constant_1, DataType::Type::kInt32, 0);
block3->AddInstruction(bounds_check);
block3->AddInstruction(array_set);
- HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(exit);
- exit->AddInstruction(new (&allocator_) HExit());
+ exit->AddInstruction(new (GetAllocator()) HExit());
block1->AddSuccessor(exit); // true successor
block1->AddSuccessor(block2); // false successor
block2->AddSuccessor(exit); // true successor
@@ -228,12 +226,12 @@
// if (j > 0) array[j] = 1; // Can't eliminate.
// }
TEST_F(BoundsCheckEliminationTest, UnderflowArrayBoundsElimination) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
- HInstruction* parameter1 = new (&allocator_) HParameterValue(
+ HInstruction* parameter1 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); // array
- HInstruction* parameter2 = new (&allocator_) HParameterValue(
+ HInstruction* parameter2 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); // i
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
@@ -242,41 +240,42 @@
HInstruction* constant_0 = graph_->GetIntConstant(0);
HInstruction* constant_max_int = graph_->GetIntConstant(INT_MAX);
- HBasicBlock* block1 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block1 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block1);
- HNullCheck* null_check = new (&allocator_) HNullCheck(parameter1, 0);
- HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0);
- HInstruction* cmp = new (&allocator_) HGreaterThanOrEqual(parameter2, array_length);
- HIf* if_inst = new (&allocator_) HIf(cmp);
+ HNullCheck* null_check = new (GetAllocator()) HNullCheck(parameter1, 0);
+ HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HInstruction* cmp = new (GetAllocator()) HGreaterThanOrEqual(parameter2, array_length);
+ HIf* if_inst = new (GetAllocator()) HIf(cmp);
block1->AddInstruction(null_check);
block1->AddInstruction(array_length);
block1->AddInstruction(cmp);
block1->AddInstruction(if_inst);
entry->AddSuccessor(block1);
- HBasicBlock* block2 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block2 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block2);
- HInstruction* sub1 = new (&allocator_) HSub(DataType::Type::kInt32, parameter2, constant_max_int);
- HInstruction* sub2 = new (&allocator_) HSub(DataType::Type::kInt32, sub1, constant_max_int);
- HInstruction* cmp2 = new (&allocator_) HLessThanOrEqual(sub2, constant_0);
- if_inst = new (&allocator_) HIf(cmp2);
+ HInstruction* sub1 =
+ new (GetAllocator()) HSub(DataType::Type::kInt32, parameter2, constant_max_int);
+ HInstruction* sub2 = new (GetAllocator()) HSub(DataType::Type::kInt32, sub1, constant_max_int);
+ HInstruction* cmp2 = new (GetAllocator()) HLessThanOrEqual(sub2, constant_0);
+ if_inst = new (GetAllocator()) HIf(cmp2);
block2->AddInstruction(sub1);
block2->AddInstruction(sub2);
block2->AddInstruction(cmp2);
block2->AddInstruction(if_inst);
- HBasicBlock* block3 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block3 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block3);
- HBoundsCheck* bounds_check = new (&allocator_)
+ HBoundsCheck* bounds_check = new (GetAllocator())
HBoundsCheck(sub2, array_length, 0);
- HArraySet* array_set = new (&allocator_) HArraySet(
+ HArraySet* array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check, constant_1, DataType::Type::kInt32, 0);
block3->AddInstruction(bounds_check);
block3->AddInstruction(array_set);
- HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(exit);
- exit->AddInstruction(new (&allocator_) HExit());
+ exit->AddInstruction(new (GetAllocator()) HExit());
block1->AddSuccessor(exit); // true successor
block1->AddSuccessor(block2); // false successor
block2->AddSuccessor(exit); // true successor
@@ -292,10 +291,10 @@
// array[5] = 1; // Can eliminate.
// array[4] = 1; // Can eliminate.
TEST_F(BoundsCheckEliminationTest, ConstantArrayBoundsElimination) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator_) HParameterValue(
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
entry->AddInstruction(parameter);
@@ -304,49 +303,49 @@
HInstruction* constant_6 = graph_->GetIntConstant(6);
HInstruction* constant_1 = graph_->GetIntConstant(1);
- HBasicBlock* block = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block);
entry->AddSuccessor(block);
- HNullCheck* null_check = new (&allocator_) HNullCheck(parameter, 0);
- HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0);
- HBoundsCheck* bounds_check6 = new (&allocator_)
+ HNullCheck* null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HBoundsCheck* bounds_check6 = new (GetAllocator())
HBoundsCheck(constant_6, array_length, 0);
- HInstruction* array_set = new (&allocator_) HArraySet(
+ HInstruction* array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check6, constant_1, DataType::Type::kInt32, 0);
block->AddInstruction(null_check);
block->AddInstruction(array_length);
block->AddInstruction(bounds_check6);
block->AddInstruction(array_set);
- null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HBoundsCheck* bounds_check5 = new (&allocator_)
+ null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HBoundsCheck* bounds_check5 = new (GetAllocator())
HBoundsCheck(constant_5, array_length, 0);
- array_set = new (&allocator_) HArraySet(
+ array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check5, constant_1, DataType::Type::kInt32, 0);
block->AddInstruction(null_check);
block->AddInstruction(array_length);
block->AddInstruction(bounds_check5);
block->AddInstruction(array_set);
- null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HBoundsCheck* bounds_check4 = new (&allocator_)
+ null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HBoundsCheck* bounds_check4 = new (GetAllocator())
HBoundsCheck(constant_4, array_length, 0);
- array_set = new (&allocator_) HArraySet(
+ array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check4, constant_1, DataType::Type::kInt32, 0);
block->AddInstruction(null_check);
block->AddInstruction(array_length);
block->AddInstruction(bounds_check4);
block->AddInstruction(array_set);
- block->AddInstruction(new (&allocator_) HGoto());
+ block->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(exit);
block->AddSuccessor(exit);
- exit->AddInstruction(new (&allocator_) HExit());
+ exit->AddInstruction(new (GetAllocator()) HExit());
RunBCE();
@@ -429,28 +428,28 @@
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1a) {
// for (int i=0; i<array.length; i++) { array[i] = 10; // Can eliminate with gvn. }
- HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 0, 1);
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, GetAllocator(), 0, 1);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1b) {
// for (int i=1; i<array.length; i++) { array[i] = 10; // Can eliminate. }
- HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 1, 1);
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, GetAllocator(), 1, 1);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1c) {
// for (int i=-1; i<array.length; i++) { array[i] = 10; // Can't eliminate. }
- HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, -1, 1);
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, GetAllocator(), -1, 1);
RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1d) {
// for (int i=0; i<=array.length; i++) { array[i] = 10; // Can't eliminate. }
- HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 0, 1, kCondGT);
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, GetAllocator(), 0, 1, kCondGT);
RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
}
@@ -458,14 +457,14 @@
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1e) {
// for (int i=0; i<array.length; i += 2) {
// array[i] = 10; // Can't eliminate due to overflow concern. }
- HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 0, 2);
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, GetAllocator(), 0, 2);
RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1f) {
// for (int i=1; i<array.length; i += 2) { array[i] = 10; // Can eliminate. }
- HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 1, 2);
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, GetAllocator(), 1, 2);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
@@ -546,35 +545,35 @@
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2a) {
// for (int i=array.length; i>0; i--) { array[i-1] = 10; // Can eliminate with gvn. }
- HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, 0);
+ HInstruction* bounds_check = BuildSSAGraph2(graph_, GetAllocator(), 0);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2b) {
// for (int i=array.length; i>1; i--) { array[i-1] = 10; // Can eliminate. }
- HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, 1);
+ HInstruction* bounds_check = BuildSSAGraph2(graph_, GetAllocator(), 1);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2c) {
// for (int i=array.length; i>-1; i--) { array[i-1] = 10; // Can't eliminate. }
- HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, -1);
+ HInstruction* bounds_check = BuildSSAGraph2(graph_, GetAllocator(), -1);
RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2d) {
// for (int i=array.length; i>=0; i--) { array[i-1] = 10; // Can't eliminate. }
- HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, 0, -1, kCondLT);
+ HInstruction* bounds_check = BuildSSAGraph2(graph_, GetAllocator(), 0, -1, kCondLT);
RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2e) {
// for (int i=array.length; i>0; i-=2) { array[i-1] = 10; // Can eliminate. }
- HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, 0, -2);
+ HInstruction* bounds_check = BuildSSAGraph2(graph_, GetAllocator(), 0, -2);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
@@ -653,7 +652,7 @@
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3a) {
// int[] array = new int[10];
// for (int i=0; i<10; i++) { array[i] = 10; // Can eliminate. }
- HInstruction* bounds_check = BuildSSAGraph3(graph_, &allocator_, 0, 1, kCondGE);
+ HInstruction* bounds_check = BuildSSAGraph3(graph_, GetAllocator(), 0, 1, kCondGE);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
@@ -661,7 +660,7 @@
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3b) {
// int[] array = new int[10];
// for (int i=1; i<10; i++) { array[i] = 10; // Can eliminate. }
- HInstruction* bounds_check = BuildSSAGraph3(graph_, &allocator_, 1, 1, kCondGE);
+ HInstruction* bounds_check = BuildSSAGraph3(graph_, GetAllocator(), 1, 1, kCondGE);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
@@ -669,7 +668,7 @@
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3c) {
// int[] array = new int[10];
// for (int i=0; i<=10; i++) { array[i] = 10; // Can't eliminate. }
- HInstruction* bounds_check = BuildSSAGraph3(graph_, &allocator_, 0, 1, kCondGT);
+ HInstruction* bounds_check = BuildSSAGraph3(graph_, GetAllocator(), 0, 1, kCondGT);
RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
}
@@ -677,7 +676,7 @@
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3d) {
// int[] array = new int[10];
// for (int i=1; i<10; i+=8) { array[i] = 10; // Can eliminate. }
- HInstruction* bounds_check = BuildSSAGraph3(graph_, &allocator_, 1, 8, kCondGE);
+ HInstruction* bounds_check = BuildSSAGraph3(graph_, GetAllocator(), 1, 8, kCondGE);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
@@ -759,21 +758,21 @@
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination4a) {
// for (int i=0; i<array.length; i++) { array[array.length-i-1] = 10; // Can eliminate with gvn. }
- HInstruction* bounds_check = BuildSSAGraph4(graph_, &allocator_, 0);
+ HInstruction* bounds_check = BuildSSAGraph4(graph_, GetAllocator(), 0);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination4b) {
// for (int i=1; i<array.length; i++) { array[array.length-i-1] = 10; // Can eliminate. }
- HInstruction* bounds_check = BuildSSAGraph4(graph_, &allocator_, 1);
+ HInstruction* bounds_check = BuildSSAGraph4(graph_, GetAllocator(), 1);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination4c) {
// for (int i=0; i<=array.length; i++) { array[array.length-i] = 10; // Can't eliminate. }
- HInstruction* bounds_check = BuildSSAGraph4(graph_, &allocator_, 0, kCondGT);
+ HInstruction* bounds_check = BuildSSAGraph4(graph_, GetAllocator(), 0, kCondGT);
RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
}
@@ -790,10 +789,10 @@
// }
// }
TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator_) HParameterValue(
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
entry->AddInstruction(parameter);
@@ -801,23 +800,23 @@
HInstruction* constant_minus_1 = graph_->GetIntConstant(-1);
HInstruction* constant_1 = graph_->GetIntConstant(1);
- HBasicBlock* block = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block);
entry->AddSuccessor(block);
- block->AddInstruction(new (&allocator_) HGoto());
+ block->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(exit);
- exit->AddInstruction(new (&allocator_) HExit());
+ exit->AddInstruction(new (GetAllocator()) HExit());
- HBasicBlock* outer_header = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* outer_header = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(outer_header);
- HPhi* phi_i = new (&allocator_) HPhi(&allocator_, 0, 0, DataType::Type::kInt32);
- HNullCheck* null_check = new (&allocator_) HNullCheck(parameter, 0);
- HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0);
- HAdd* add = new (&allocator_) HAdd(DataType::Type::kInt32, array_length, constant_minus_1);
- HInstruction* cmp = new (&allocator_) HGreaterThanOrEqual(phi_i, add);
- HIf* if_inst = new (&allocator_) HIf(cmp);
+ HPhi* phi_i = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32);
+ HNullCheck* null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HAdd* add = new (GetAllocator()) HAdd(DataType::Type::kInt32, array_length, constant_minus_1);
+ HInstruction* cmp = new (GetAllocator()) HGreaterThanOrEqual(phi_i, add);
+ HIf* if_inst = new (GetAllocator()) HIf(cmp);
outer_header->AddPhi(phi_i);
outer_header->AddInstruction(null_check);
outer_header->AddInstruction(array_length);
@@ -826,15 +825,15 @@
outer_header->AddInstruction(if_inst);
phi_i->AddInput(constant_0);
- HBasicBlock* inner_header = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* inner_header = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(inner_header);
- HPhi* phi_j = new (&allocator_) HPhi(&allocator_, 0, 0, DataType::Type::kInt32);
- null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HSub* sub = new (&allocator_) HSub(DataType::Type::kInt32, array_length, phi_i);
- add = new (&allocator_) HAdd(DataType::Type::kInt32, sub, constant_minus_1);
- cmp = new (&allocator_) HGreaterThanOrEqual(phi_j, add);
- if_inst = new (&allocator_) HIf(cmp);
+ HPhi* phi_j = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32);
+ null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HSub* sub = new (GetAllocator()) HSub(DataType::Type::kInt32, array_length, phi_i);
+ add = new (GetAllocator()) HAdd(DataType::Type::kInt32, sub, constant_minus_1);
+ cmp = new (GetAllocator()) HGreaterThanOrEqual(phi_j, add);
+ if_inst = new (GetAllocator()) HIf(cmp);
inner_header->AddPhi(phi_j);
inner_header->AddInstruction(null_check);
inner_header->AddInstruction(array_length);
@@ -844,25 +843,25 @@
inner_header->AddInstruction(if_inst);
phi_j->AddInput(constant_0);
- HBasicBlock* inner_body_compare = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* inner_body_compare = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(inner_body_compare);
- null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HBoundsCheck* bounds_check1 = new (&allocator_) HBoundsCheck(phi_j, array_length, 0);
- HArrayGet* array_get_j = new (&allocator_)
+ null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HBoundsCheck* bounds_check1 = new (GetAllocator()) HBoundsCheck(phi_j, array_length, 0);
+ HArrayGet* array_get_j = new (GetAllocator())
HArrayGet(null_check, bounds_check1, DataType::Type::kInt32, 0);
inner_body_compare->AddInstruction(null_check);
inner_body_compare->AddInstruction(array_length);
inner_body_compare->AddInstruction(bounds_check1);
inner_body_compare->AddInstruction(array_get_j);
- HInstruction* j_plus_1 = new (&allocator_) HAdd(DataType::Type::kInt32, phi_j, constant_1);
- null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HBoundsCheck* bounds_check2 = new (&allocator_) HBoundsCheck(j_plus_1, array_length, 0);
- HArrayGet* array_get_j_plus_1 = new (&allocator_)
+ HInstruction* j_plus_1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi_j, constant_1);
+ null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HBoundsCheck* bounds_check2 = new (GetAllocator()) HBoundsCheck(j_plus_1, array_length, 0);
+ HArrayGet* array_get_j_plus_1 = new (GetAllocator())
HArrayGet(null_check, bounds_check2, DataType::Type::kInt32, 0);
- cmp = new (&allocator_) HGreaterThanOrEqual(array_get_j, array_get_j_plus_1);
- if_inst = new (&allocator_) HIf(cmp);
+ cmp = new (GetAllocator()) HGreaterThanOrEqual(array_get_j, array_get_j_plus_1);
+ if_inst = new (GetAllocator()) HIf(cmp);
inner_body_compare->AddInstruction(j_plus_1);
inner_body_compare->AddInstruction(null_check);
inner_body_compare->AddInstruction(array_length);
@@ -871,14 +870,14 @@
inner_body_compare->AddInstruction(cmp);
inner_body_compare->AddInstruction(if_inst);
- HBasicBlock* inner_body_swap = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* inner_body_swap = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(inner_body_swap);
- j_plus_1 = new (&allocator_) HAdd(DataType::Type::kInt32, phi_j, constant_1);
+ j_plus_1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi_j, constant_1);
// temp = array[j+1]
- null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HInstruction* bounds_check3 = new (&allocator_) HBoundsCheck(j_plus_1, array_length, 0);
- array_get_j_plus_1 = new (&allocator_)
+ null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HInstruction* bounds_check3 = new (GetAllocator()) HBoundsCheck(j_plus_1, array_length, 0);
+ array_get_j_plus_1 = new (GetAllocator())
HArrayGet(null_check, bounds_check3, DataType::Type::kInt32, 0);
inner_body_swap->AddInstruction(j_plus_1);
inner_body_swap->AddInstruction(null_check);
@@ -886,48 +885,48 @@
inner_body_swap->AddInstruction(bounds_check3);
inner_body_swap->AddInstruction(array_get_j_plus_1);
// array[j+1] = array[j]
- null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HInstruction* bounds_check4 = new (&allocator_) HBoundsCheck(phi_j, array_length, 0);
- array_get_j = new (&allocator_)
+ null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HInstruction* bounds_check4 = new (GetAllocator()) HBoundsCheck(phi_j, array_length, 0);
+ array_get_j = new (GetAllocator())
HArrayGet(null_check, bounds_check4, DataType::Type::kInt32, 0);
inner_body_swap->AddInstruction(null_check);
inner_body_swap->AddInstruction(array_length);
inner_body_swap->AddInstruction(bounds_check4);
inner_body_swap->AddInstruction(array_get_j);
- null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HInstruction* bounds_check5 = new (&allocator_) HBoundsCheck(j_plus_1, array_length, 0);
- HArraySet* array_set_j_plus_1 = new (&allocator_)
+ null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HInstruction* bounds_check5 = new (GetAllocator()) HBoundsCheck(j_plus_1, array_length, 0);
+ HArraySet* array_set_j_plus_1 = new (GetAllocator())
HArraySet(null_check, bounds_check5, array_get_j, DataType::Type::kInt32, 0);
inner_body_swap->AddInstruction(null_check);
inner_body_swap->AddInstruction(array_length);
inner_body_swap->AddInstruction(bounds_check5);
inner_body_swap->AddInstruction(array_set_j_plus_1);
// array[j] = temp
- null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HInstruction* bounds_check6 = new (&allocator_) HBoundsCheck(phi_j, array_length, 0);
- HArraySet* array_set_j = new (&allocator_)
+ null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HInstruction* bounds_check6 = new (GetAllocator()) HBoundsCheck(phi_j, array_length, 0);
+ HArraySet* array_set_j = new (GetAllocator())
HArraySet(null_check, bounds_check6, array_get_j_plus_1, DataType::Type::kInt32, 0);
inner_body_swap->AddInstruction(null_check);
inner_body_swap->AddInstruction(array_length);
inner_body_swap->AddInstruction(bounds_check6);
inner_body_swap->AddInstruction(array_set_j);
- inner_body_swap->AddInstruction(new (&allocator_) HGoto());
+ inner_body_swap->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* inner_body_add = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* inner_body_add = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(inner_body_add);
- add = new (&allocator_) HAdd(DataType::Type::kInt32, phi_j, constant_1);
+ add = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi_j, constant_1);
inner_body_add->AddInstruction(add);
- inner_body_add->AddInstruction(new (&allocator_) HGoto());
+ inner_body_add->AddInstruction(new (GetAllocator()) HGoto());
phi_j->AddInput(add);
- HBasicBlock* outer_body_add = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* outer_body_add = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(outer_body_add);
- add = new (&allocator_) HAdd(DataType::Type::kInt32, phi_i, constant_1);
+ add = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi_i, constant_1);
outer_body_add->AddInstruction(add);
- outer_body_add->AddInstruction(new (&allocator_) HGoto());
+ outer_body_add->AddInstruction(new (GetAllocator()) HGoto());
phi_i->AddInput(add);
block->AddSuccessor(outer_header);
@@ -961,10 +960,10 @@
// array[param_i%10] = 10; // Can't eliminate, when param_i < 0
// }
TEST_F(BoundsCheckEliminationTest, ModArrayBoundsElimination) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
- HInstruction* param_i = new (&allocator_)
+ HInstruction* param_i = new (GetAllocator())
HParameterValue(graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32);
entry->AddInstruction(param_i);
@@ -974,17 +973,17 @@
HInstruction* constant_200 = graph_->GetIntConstant(200);
HInstruction* constant_minus_10 = graph_->GetIntConstant(-10);
- HBasicBlock* block = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block);
entry->AddSuccessor(block);
// We pass a bogus constant for the class to avoid mocking one.
- HInstruction* new_array = new (&allocator_) HNewArray(constant_10, constant_10, 0);
+ HInstruction* new_array = new (GetAllocator()) HNewArray(constant_10, constant_10, 0);
block->AddInstruction(new_array);
- block->AddInstruction(new (&allocator_) HGoto());
+ block->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* loop_header = new (&allocator_) HBasicBlock(graph_);
- HBasicBlock* loop_body = new (&allocator_) HBasicBlock(graph_);
- HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* loop_header = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* loop_body = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(loop_header);
graph_->AddBlock(loop_body);
@@ -994,9 +993,9 @@
loop_header->AddSuccessor(loop_body); // false successor
loop_body->AddSuccessor(loop_header);
- HPhi* phi = new (&allocator_) HPhi(&allocator_, 0, 0, DataType::Type::kInt32);
- HInstruction* cmp = new (&allocator_) HGreaterThanOrEqual(phi, constant_200);
- HInstruction* if_inst = new (&allocator_) HIf(cmp);
+ HPhi* phi = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32);
+ HInstruction* cmp = new (GetAllocator()) HGreaterThanOrEqual(phi, constant_200);
+ HInstruction* if_inst = new (GetAllocator()) HIf(cmp);
loop_header->AddPhi(phi);
loop_header->AddInstruction(cmp);
loop_header->AddInstruction(if_inst);
@@ -1005,49 +1004,52 @@
//////////////////////////////////////////////////////////////////////////////////
// LOOP BODY:
// array[i % 10] = 10;
- HRem* i_mod_10 = new (&allocator_) HRem(DataType::Type::kInt32, phi, constant_10, 0);
- HBoundsCheck* bounds_check_i_mod_10 = new (&allocator_) HBoundsCheck(i_mod_10, constant_10, 0);
- HInstruction* array_set = new (&allocator_) HArraySet(
+ HRem* i_mod_10 = new (GetAllocator()) HRem(DataType::Type::kInt32, phi, constant_10, 0);
+ HBoundsCheck* bounds_check_i_mod_10 = new (GetAllocator()) HBoundsCheck(i_mod_10, constant_10, 0);
+ HInstruction* array_set = new (GetAllocator()) HArraySet(
new_array, bounds_check_i_mod_10, constant_10, DataType::Type::kInt32, 0);
loop_body->AddInstruction(i_mod_10);
loop_body->AddInstruction(bounds_check_i_mod_10);
loop_body->AddInstruction(array_set);
// array[i % 1] = 10;
- HRem* i_mod_1 = new (&allocator_) HRem(DataType::Type::kInt32, phi, constant_1, 0);
- HBoundsCheck* bounds_check_i_mod_1 = new (&allocator_) HBoundsCheck(i_mod_1, constant_10, 0);
- array_set = new (&allocator_) HArraySet(
+ HRem* i_mod_1 = new (GetAllocator()) HRem(DataType::Type::kInt32, phi, constant_1, 0);
+ HBoundsCheck* bounds_check_i_mod_1 = new (GetAllocator()) HBoundsCheck(i_mod_1, constant_10, 0);
+ array_set = new (GetAllocator()) HArraySet(
new_array, bounds_check_i_mod_1, constant_10, DataType::Type::kInt32, 0);
loop_body->AddInstruction(i_mod_1);
loop_body->AddInstruction(bounds_check_i_mod_1);
loop_body->AddInstruction(array_set);
// array[i % 200] = 10;
- HRem* i_mod_200 = new (&allocator_) HRem(DataType::Type::kInt32, phi, constant_1, 0);
- HBoundsCheck* bounds_check_i_mod_200 = new (&allocator_) HBoundsCheck(i_mod_200, constant_10, 0);
- array_set = new (&allocator_) HArraySet(
+ HRem* i_mod_200 = new (GetAllocator()) HRem(DataType::Type::kInt32, phi, constant_1, 0);
+ HBoundsCheck* bounds_check_i_mod_200 = new (GetAllocator()) HBoundsCheck(
+ i_mod_200, constant_10, 0);
+ array_set = new (GetAllocator()) HArraySet(
new_array, bounds_check_i_mod_200, constant_10, DataType::Type::kInt32, 0);
loop_body->AddInstruction(i_mod_200);
loop_body->AddInstruction(bounds_check_i_mod_200);
loop_body->AddInstruction(array_set);
// array[i % -10] = 10;
- HRem* i_mod_minus_10 = new (&allocator_) HRem(DataType::Type::kInt32, phi, constant_minus_10, 0);
- HBoundsCheck* bounds_check_i_mod_minus_10 = new (&allocator_) HBoundsCheck(
+ HRem* i_mod_minus_10 = new (GetAllocator()) HRem(
+ DataType::Type::kInt32, phi, constant_minus_10, 0);
+ HBoundsCheck* bounds_check_i_mod_minus_10 = new (GetAllocator()) HBoundsCheck(
i_mod_minus_10, constant_10, 0);
- array_set = new (&allocator_) HArraySet(
+ array_set = new (GetAllocator()) HArraySet(
new_array, bounds_check_i_mod_minus_10, constant_10, DataType::Type::kInt32, 0);
loop_body->AddInstruction(i_mod_minus_10);
loop_body->AddInstruction(bounds_check_i_mod_minus_10);
loop_body->AddInstruction(array_set);
// array[i%array.length] = 10;
- HNullCheck* null_check = new (&allocator_) HNullCheck(new_array, 0);
- HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0);
- HRem* i_mod_array_length = new (&allocator_) HRem(DataType::Type::kInt32, phi, array_length, 0);
- HBoundsCheck* bounds_check_i_mod_array_len = new (&allocator_) HBoundsCheck(
+ HNullCheck* null_check = new (GetAllocator()) HNullCheck(new_array, 0);
+ HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HRem* i_mod_array_length = new (GetAllocator()) HRem(
+ DataType::Type::kInt32, phi, array_length, 0);
+ HBoundsCheck* bounds_check_i_mod_array_len = new (GetAllocator()) HBoundsCheck(
i_mod_array_length, array_length, 0);
- array_set = new (&allocator_) HArraySet(
+ array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check_i_mod_array_len, constant_10, DataType::Type::kInt32, 0);
loop_body->AddInstruction(null_check);
loop_body->AddInstruction(array_length);
@@ -1056,23 +1058,23 @@
loop_body->AddInstruction(array_set);
// array[param_i % 10] = 10;
- HRem* param_i_mod_10 = new (&allocator_) HRem(DataType::Type::kInt32, param_i, constant_10, 0);
- HBoundsCheck* bounds_check_param_i_mod_10 = new (&allocator_) HBoundsCheck(
+ HRem* param_i_mod_10 = new (GetAllocator()) HRem(DataType::Type::kInt32, param_i, constant_10, 0);
+ HBoundsCheck* bounds_check_param_i_mod_10 = new (GetAllocator()) HBoundsCheck(
param_i_mod_10, constant_10, 0);
- array_set = new (&allocator_) HArraySet(
+ array_set = new (GetAllocator()) HArraySet(
new_array, bounds_check_param_i_mod_10, constant_10, DataType::Type::kInt32, 0);
loop_body->AddInstruction(param_i_mod_10);
loop_body->AddInstruction(bounds_check_param_i_mod_10);
loop_body->AddInstruction(array_set);
// array[param_i%array.length] = 10;
- null_check = new (&allocator_) HNullCheck(new_array, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HRem* param_i_mod_array_length = new (&allocator_) HRem(
+ null_check = new (GetAllocator()) HNullCheck(new_array, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HRem* param_i_mod_array_length = new (GetAllocator()) HRem(
DataType::Type::kInt32, param_i, array_length, 0);
- HBoundsCheck* bounds_check_param_i_mod_array_len = new (&allocator_) HBoundsCheck(
+ HBoundsCheck* bounds_check_param_i_mod_array_len = new (GetAllocator()) HBoundsCheck(
param_i_mod_array_length, array_length, 0);
- array_set = new (&allocator_) HArraySet(
+ array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check_param_i_mod_array_len, constant_10, DataType::Type::kInt32, 0);
loop_body->AddInstruction(null_check);
loop_body->AddInstruction(array_length);
@@ -1081,13 +1083,13 @@
loop_body->AddInstruction(array_set);
// i++;
- HInstruction* add = new (&allocator_) HAdd(DataType::Type::kInt32, phi, constant_1);
+ HInstruction* add = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi, constant_1);
loop_body->AddInstruction(add);
- loop_body->AddInstruction(new (&allocator_) HGoto());
+ loop_body->AddInstruction(new (GetAllocator()) HGoto());
phi->AddInput(add);
//////////////////////////////////////////////////////////////////////////////////
- exit->AddInstruction(new (&allocator_) HExit());
+ exit->AddInstruction(new (GetAllocator()) HExit());
RunBCE();
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 0e708ed..76350a6 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -32,13 +32,12 @@
namespace art {
HGraphBuilder::HGraphBuilder(HGraph* graph,
- DexCompilationUnit* dex_compilation_unit,
- const DexCompilationUnit* const outer_compilation_unit,
+ const DexCompilationUnit* dex_compilation_unit,
+ const DexCompilationUnit* outer_compilation_unit,
CompilerDriver* driver,
CodeGenerator* code_generator,
OptimizingCompilerStats* compiler_stats,
const uint8_t* interpreter_metadata,
- Handle<mirror::DexCache> dex_cache,
VariableSizedHandleScope* handles)
: graph_(graph),
dex_file_(&graph->GetDexFile()),
@@ -63,7 +62,7 @@
code_generator,
interpreter_metadata,
compiler_stats,
- dex_cache,
+ dex_compilation_unit->GetDexCache(),
handles) {}
bool HGraphBuilder::SkipCompilation(size_t number_of_branches) {
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 9524fe2..6c5985a 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -36,13 +36,12 @@
class HGraphBuilder : public ValueObject {
public:
HGraphBuilder(HGraph* graph,
- DexCompilationUnit* dex_compilation_unit,
- const DexCompilationUnit* const outer_compilation_unit,
+ const DexCompilationUnit* dex_compilation_unit,
+ const DexCompilationUnit* outer_compilation_unit,
CompilerDriver* driver,
CodeGenerator* code_generator,
OptimizingCompilerStats* compiler_stats,
const uint8_t* interpreter_metadata,
- Handle<mirror::DexCache> dex_cache,
VariableSizedHandleScope* handles);
// Only for unit testing.
@@ -89,7 +88,7 @@
// The compilation unit of the current method being compiled. Note that
// it can be an inlined method.
- DexCompilationUnit* const dex_compilation_unit_;
+ const DexCompilationUnit* const dex_compilation_unit_;
CompilerDriver* const compiler_driver_;
diff --git a/compiler/optimizing/cha_guard_optimization.cc b/compiler/optimizing/cha_guard_optimization.cc
index c806dbf..3addaee 100644
--- a/compiler/optimizing/cha_guard_optimization.cc
+++ b/compiler/optimizing/cha_guard_optimization.cc
@@ -36,7 +36,7 @@
: HGraphVisitor(graph),
block_has_cha_guard_(GetGraph()->GetBlocks().size(),
0,
- graph->GetArena()->Adapter(kArenaAllocCHA)),
+ graph->GetAllocator()->Adapter(kArenaAllocCHA)),
instruction_iterator_(nullptr) {
number_of_guards_to_visit_ = GetGraph()->GetNumberOfCHAGuards();
DCHECK_NE(number_of_guards_to_visit_, 0u);
@@ -202,8 +202,8 @@
HInstruction* suspend = loop_info->GetSuspendCheck();
// Need a new deoptimize instruction that copies the environment
// of the suspend instruction for the loop.
- HDeoptimize* deoptimize = new (GetGraph()->GetArena()) HDeoptimize(
- GetGraph()->GetArena(), compare, DeoptimizationKind::kCHA, suspend->GetDexPc());
+ HDeoptimize* deoptimize = new (GetGraph()->GetAllocator()) HDeoptimize(
+ GetGraph()->GetAllocator(), compare, DeoptimizationKind::kCHA, suspend->GetDexPc());
pre_header->InsertInstructionBefore(deoptimize, pre_header->GetLastInstruction());
deoptimize->CopyEnvironmentFromWithLoopPhiAdjustment(
suspend->GetEnvironment(), loop_info->GetHeader());
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 3cb3792..dd8e3d2 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -322,7 +322,7 @@
void CodeGenerator::CreateCommonInvokeLocationSummary(
HInvoke* invoke, InvokeDexCallingConventionVisitor* visitor) {
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena();
+ ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
LocationSummary* locations = new (allocator) LocationSummary(invoke,
LocationSummary::kCallOnMainOnly);
@@ -420,7 +420,7 @@
bool is_get = field_access->IsUnresolvedInstanceFieldGet()
|| field_access->IsUnresolvedStaticFieldGet();
- ArenaAllocator* allocator = field_access->GetBlock()->GetGraph()->GetArena();
+ ArenaAllocator* allocator = field_access->GetBlock()->GetGraph()->GetAllocator();
LocationSummary* locations =
new (allocator) LocationSummary(field_access, LocationSummary::kCallOnMainOnly);
@@ -541,7 +541,7 @@
Location runtime_return_location) {
DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall);
DCHECK_EQ(cls->InputCount(), 1u);
- LocationSummary* locations = new (cls->GetBlock()->GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (cls->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
cls, LocationSummary::kCallOnMainOnly);
locations->SetInAt(0, Location::NoLocation());
locations->AddTemp(runtime_type_index_location);
@@ -617,61 +617,49 @@
const InstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options,
OptimizingCompilerStats* stats) {
- ArenaAllocator* arena = graph->GetArena();
+ ArenaAllocator* allocator = graph->GetAllocator();
switch (instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm
case kArm:
case kThumb2: {
return std::unique_ptr<CodeGenerator>(
- new (arena) arm::CodeGeneratorARMVIXL(graph,
- *isa_features.AsArmInstructionSetFeatures(),
- compiler_options,
- stats));
+ new (allocator) arm::CodeGeneratorARMVIXL(
+ graph, *isa_features.AsArmInstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64: {
return std::unique_ptr<CodeGenerator>(
- new (arena) arm64::CodeGeneratorARM64(graph,
- *isa_features.AsArm64InstructionSetFeatures(),
- compiler_options,
- stats));
+ new (allocator) arm64::CodeGeneratorARM64(
+ graph, *isa_features.AsArm64InstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_mips
case kMips: {
return std::unique_ptr<CodeGenerator>(
- new (arena) mips::CodeGeneratorMIPS(graph,
- *isa_features.AsMipsInstructionSetFeatures(),
- compiler_options,
- stats));
+ new (allocator) mips::CodeGeneratorMIPS(
+ graph, *isa_features.AsMipsInstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64: {
return std::unique_ptr<CodeGenerator>(
- new (arena) mips64::CodeGeneratorMIPS64(graph,
- *isa_features.AsMips64InstructionSetFeatures(),
- compiler_options,
- stats));
+ new (allocator) mips64::CodeGeneratorMIPS64(
+ graph, *isa_features.AsMips64InstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_x86
case kX86: {
return std::unique_ptr<CodeGenerator>(
- new (arena) x86::CodeGeneratorX86(graph,
- *isa_features.AsX86InstructionSetFeatures(),
- compiler_options,
- stats));
+ new (allocator) x86::CodeGeneratorX86(
+ graph, *isa_features.AsX86InstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64: {
return std::unique_ptr<CodeGenerator>(
- new (arena) x86_64::CodeGeneratorX86_64(graph,
- *isa_features.AsX86_64InstructionSetFeatures(),
- compiler_options,
- stats));
+ new (allocator) x86_64::CodeGeneratorX86_64(
+ graph, *isa_features.AsX86_64InstructionSetFeatures(), compiler_options, stats));
}
#endif
default:
@@ -712,7 +700,7 @@
// One can write loops through try/catch, which we do not support for OSR anyway.
return;
}
- ArenaVector<HSuspendCheck*> loop_headers(graph.GetArena()->Adapter(kArenaAllocMisc));
+ ArenaVector<HSuspendCheck*> loop_headers(graph.GetAllocator()->Adapter(kArenaAllocMisc));
for (HBasicBlock* block : graph.GetReversePostOrder()) {
if (block->IsLoopHeader()) {
HSuspendCheck* suspend_check = block->GetLoopInformation()->GetSuspendCheck();
@@ -721,7 +709,8 @@
}
}
}
- ArenaVector<size_t> covered(loop_headers.size(), 0, graph.GetArena()->Adapter(kArenaAllocMisc));
+ ArenaVector<size_t> covered(
+ loop_headers.size(), 0, graph.GetAllocator()->Adapter(kArenaAllocMisc));
IterationRange<DexInstructionIterator> instructions = code_item.Instructions();
for (auto it = instructions.begin(); it != instructions.end(); ++it) {
const uint32_t dex_pc = it.GetDexPC(instructions.begin());
@@ -909,7 +898,7 @@
}
void CodeGenerator::RecordCatchBlockInfo() {
- ArenaAllocator* arena = graph_->GetArena();
+ ArenaAllocator* allocator = graph_->GetAllocator();
for (HBasicBlock* block : *block_order_) {
if (!block->IsCatchBlock()) {
@@ -924,7 +913,7 @@
// The stack mask is not used, so we leave it empty.
ArenaBitVector* stack_mask =
- ArenaBitVector::Create(arena, 0, /* expandable */ true, kArenaAllocCodeGenerator);
+ ArenaBitVector::Create(allocator, 0, /* expandable */ true, kArenaAllocCodeGenerator);
stack_map_stream_.BeginStackMapEntry(dex_pc,
native_pc,
@@ -1194,7 +1183,8 @@
if (can_throw_into_catch_block) {
call_kind = LocationSummary::kCallOnSlowPath;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
if (can_throw_into_catch_block && compiler_options_.GetImplicitNullChecks()) {
locations->SetCustomSlowPathCallerSaves(caller_saves); // Default: no caller-save registers.
}
@@ -1237,7 +1227,7 @@
Location from2,
Location to2,
DataType::Type type2) {
- HParallelMove parallel_move(GetGraph()->GetArena());
+ HParallelMove parallel_move(GetGraph()->GetAllocator());
parallel_move.AddMove(from1, to1, type1, nullptr);
parallel_move.AddMove(from2, to2, type2, nullptr);
GetMoveResolver()->EmitNativeCode(¶llel_move);
@@ -1400,7 +1390,7 @@
return;
}
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena();
+ ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
LocationSummary* locations = new (allocator) LocationSummary(invoke,
LocationSummary::kCallOnSlowPath,
kIntrinsified);
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index ac3c839..2c3cf26 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -605,26 +605,26 @@
fpu_spill_mask_(0),
first_register_slot_in_slow_path_(0),
allocated_registers_(RegisterSet::Empty()),
- blocked_core_registers_(graph->GetArena()->AllocArray<bool>(number_of_core_registers,
- kArenaAllocCodeGenerator)),
- blocked_fpu_registers_(graph->GetArena()->AllocArray<bool>(number_of_fpu_registers,
- kArenaAllocCodeGenerator)),
+ blocked_core_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_core_registers,
+ kArenaAllocCodeGenerator)),
+ blocked_fpu_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_fpu_registers,
+ kArenaAllocCodeGenerator)),
number_of_core_registers_(number_of_core_registers),
number_of_fpu_registers_(number_of_fpu_registers),
number_of_register_pairs_(number_of_register_pairs),
core_callee_save_mask_(core_callee_save_mask),
fpu_callee_save_mask_(fpu_callee_save_mask),
- stack_map_stream_(graph->GetArena(), graph->GetInstructionSet()),
+ stack_map_stream_(graph->GetAllocator(), graph->GetInstructionSet()),
block_order_(nullptr),
jit_string_roots_(StringReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_class_roots_(TypeReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
disasm_info_(nullptr),
stats_(stats),
graph_(graph),
compiler_options_(compiler_options),
- slow_paths_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ slow_paths_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
current_slow_path_(nullptr),
current_block_index_(0),
is_leaf_(true),
@@ -668,8 +668,8 @@
// We use raw array allocations instead of ArenaVector<> because Labels are
// non-constructible and non-movable and as such cannot be held in a vector.
size_t size = GetGraph()->GetBlocks().size();
- LabelType* labels = GetGraph()->GetArena()->AllocArray<LabelType>(size,
- kArenaAllocCodeGenerator);
+ LabelType* labels =
+ GetGraph()->GetAllocator()->AllocArray<LabelType>(size, kArenaAllocCodeGenerator);
for (size_t i = 0; i != size; ++i) {
new(labels + i) LabelType();
}
@@ -823,7 +823,8 @@
SlowPathGenerator(HGraph* graph, CodeGenerator* codegen)
: graph_(graph),
codegen_(codegen),
- slow_path_map_(std::less<uint32_t>(), graph->GetArena()->Adapter(kArenaAllocSlowPaths)) {}
+ slow_path_map_(std::less<uint32_t>(),
+ graph->GetAllocator()->Adapter(kArenaAllocSlowPaths)) {}
// Creates and adds a new slow-path, if needed, or returns existing one otherwise.
// Templating the method (rather than the whole class) on the slow-path type enables
@@ -857,10 +858,11 @@
}
} else {
// First time this dex-pc is seen.
- iter = slow_path_map_.Put(dex_pc, {{}, {graph_->GetArena()->Adapter(kArenaAllocSlowPaths)}});
+ iter = slow_path_map_.Put(dex_pc,
+ {{}, {graph_->GetAllocator()->Adapter(kArenaAllocSlowPaths)}});
}
// Cannot share: create and add new slow-path for this particular dex-pc.
- SlowPathCodeType* slow_path = new (graph_->GetArena()) SlowPathCodeType(instruction);
+ SlowPathCodeType* slow_path = new (graph_->GetAllocator()) SlowPathCodeType(instruction);
iter->second.emplace_back(std::make_pair(instruction, slow_path));
codegen_->AddSlowPath(slow_path);
return slow_path;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 468e93a..9be9117 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -620,7 +620,7 @@
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(
locations->InAt(0),
LocationFrom(calling_convention.GetRegisterAt(0)),
@@ -1294,7 +1294,7 @@
// We're moving two or three locations to locations that could
// overlap, so we need a parallel move resolver.
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(ref_,
LocationFrom(calling_convention.GetRegisterAt(0)),
type,
@@ -1453,28 +1453,28 @@
callee_saved_fp_registers.GetList(),
compiler_options,
stats),
- block_labels_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ block_labels_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ jump_tables_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
location_builder_(graph, this),
instruction_visitor_(graph, this),
- move_resolver_(graph->GetArena(), this),
- assembler_(graph->GetArena()),
+ move_resolver_(graph->GetAllocator(), this),
+ assembler_(graph->GetAllocator()),
isa_features_(isa_features),
uint32_literals_(std::less<uint32_t>(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
uint64_literals_(std::less<uint64_t>(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- string_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- baker_read_barrier_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ baker_read_barrier_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(TypeReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
// Save the link register (containing the return address) to mimic Quick.
AddAllocatedRegister(LocationFrom(lr));
}
@@ -2204,7 +2204,7 @@
SuspendCheckSlowPathARM64* slow_path =
down_cast<SuspendCheckSlowPathARM64*>(instruction->GetSlowPath());
if (slow_path == nullptr) {
- slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARM64(instruction, successor);
+ slow_path = new (GetGraph()->GetAllocator()) SuspendCheckSlowPathARM64(instruction, successor);
instruction->SetSlowPath(slow_path);
codegen_->AddSlowPath(slow_path);
if (successor != nullptr) {
@@ -2235,36 +2235,9 @@
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
-#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M) \
- /* No unimplemented IR. */
-
-#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode
-
-enum UnimplementedInstructionBreakCode {
- // Using a base helps identify when we hit such breakpoints.
- UnimplementedInstructionBreakCodeBaseCode = 0x900,
-#define ENUM_UNIMPLEMENTED_INSTRUCTION(name) UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name),
- FOR_EACH_UNIMPLEMENTED_INSTRUCTION(ENUM_UNIMPLEMENTED_INSTRUCTION)
-#undef ENUM_UNIMPLEMENTED_INSTRUCTION
-};
-
-#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name) \
- void InstructionCodeGeneratorARM64::Visit##name(H##name* instr ATTRIBUTE_UNUSED) { \
- __ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name)); \
- } \
- void LocationsBuilderARM64::Visit##name(H##name* instr) { \
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); \
- locations->SetOut(Location::Any()); \
- }
- FOR_EACH_UNIMPLEMENTED_INSTRUCTION(DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS)
-#undef DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS
-
-#undef UNIMPLEMENTED_INSTRUCTION_BREAK_CODE
-#undef FOR_EACH_UNIMPLEMENTED_INSTRUCTION
-
void LocationsBuilderARM64::HandleBinaryOp(HBinaryOperation* instr) {
DCHECK_EQ(instr->InputCount(), 2U);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instr);
DataType::Type type = instr->GetResultType();
switch (type) {
case DataType::Type::kInt32:
@@ -2293,10 +2266,10 @@
bool object_field_get_with_read_barrier =
kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- object_field_get_with_read_barrier ?
- LocationSummary::kCallOnSlowPath :
- LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ object_field_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
// We need a temporary register for the read barrier marking slow
@@ -2378,7 +2351,7 @@
void LocationsBuilderARM64::HandleFieldSet(HInstruction* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
if (IsConstantZeroBitPattern(instruction->InputAt(1))) {
locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant()));
@@ -2485,7 +2458,7 @@
void LocationsBuilderARM64::HandleShift(HBinaryOperation* instr) {
DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instr);
DataType::Type type = instr->GetResultType();
switch (type) {
case DataType::Type::kInt32:
@@ -2556,7 +2529,7 @@
void LocationsBuilderARM64::VisitBitwiseNegatedRight(HBitwiseNegatedRight* instr) {
DCHECK(DataType::IsIntegralType(instr->GetType())) << instr->GetType();
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instr);
locations->SetInAt(0, Location::RequiresRegister());
// There is no immediate variant of negated bitwise instructions in AArch64.
locations->SetInAt(1, Location::RequiresRegister());
@@ -2588,7 +2561,7 @@
DCHECK(instruction->GetType() == DataType::Type::kInt32 ||
instruction->GetType() == DataType::Type::kInt64);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
if (instruction->GetInstrKind() == HInstruction::kNeg) {
locations->SetInAt(0, Location::ConstantLocation(instruction->InputAt(0)->AsConstant()));
} else {
@@ -2659,7 +2632,7 @@
void LocationsBuilderARM64::VisitIntermediateAddress(HIntermediateAddress* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->GetOffset(), instruction));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
@@ -2673,7 +2646,7 @@
void LocationsBuilderARM64::VisitIntermediateAddressIndex(HIntermediateAddressIndex* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
HIntConstant* shift = instruction->GetShift()->AsIntConstant();
@@ -2705,7 +2678,7 @@
void LocationsBuilderARM64::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instr, LocationSummary::kNoCall);
HInstruction* accumulator = instr->InputAt(HMultiplyAccumulate::kInputAccumulatorIndex);
if (instr->GetOpKind() == HInstruction::kSub &&
accumulator->IsConstant() &&
@@ -2759,10 +2732,10 @@
bool object_array_get_with_read_barrier =
kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- object_array_get_with_read_barrier ?
- LocationSummary::kCallOnSlowPath :
- LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ object_array_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
// We need a temporary register for the read barrier marking slow
@@ -2929,7 +2902,7 @@
}
void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -2953,7 +2926,7 @@
DataType::Type value_type = instruction->GetComponentType();
bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction,
may_need_runtime_call_for_type_check ?
LocationSummary::kCallOnSlowPath :
@@ -3039,7 +3012,7 @@
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathARM64(instruction);
+ slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathARM64(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
vixl::aarch64::Label non_zero;
@@ -3154,7 +3127,7 @@
void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
BoundsCheckSlowPathARM64* slow_path =
- new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64(instruction);
+ new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARM64(instruction);
codegen_->AddSlowPath(slow_path);
__ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1));
__ B(slow_path->GetEntryLabel(), hs);
@@ -3162,7 +3135,7 @@
void LocationsBuilderARM64::VisitClinitCheck(HClinitCheck* check) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+ new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
locations->SetInAt(0, Location::RequiresRegister());
if (check->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -3171,7 +3144,7 @@
void InstructionCodeGeneratorARM64::VisitClinitCheck(HClinitCheck* check) {
// We assume the class is not null.
- SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
+ SlowPathCodeARM64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathARM64(
check->GetLoadClass(), check, check->GetDexPc(), true);
codegen_->AddSlowPath(slow_path);
GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
@@ -3210,7 +3183,7 @@
void LocationsBuilderARM64::VisitCompare(HCompare* compare) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(compare, LocationSummary::kNoCall);
DataType::Type in_type = compare->InputAt(0)->GetType();
switch (in_type) {
case DataType::Type::kBool:
@@ -3276,7 +3249,7 @@
}
void LocationsBuilderARM64::HandleCondition(HCondition* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
if (DataType::IsFloatingPointType(instruction->InputAt(0)->GetType())) {
locations->SetInAt(0, Location::RequiresFpuRegister());
@@ -3482,7 +3455,7 @@
void LocationsBuilderARM64::VisitDiv(HDiv* div) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(div, LocationSummary::kNoCall);
switch (div->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -3528,7 +3501,7 @@
void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM64(instruction);
+ new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathARM64(instruction);
codegen_->AddSlowPath(slow_path);
Location value = instruction->GetLocations()->InAt(0);
@@ -3554,7 +3527,7 @@
void LocationsBuilderARM64::VisitDoubleConstant(HDoubleConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -3572,7 +3545,7 @@
void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -3726,7 +3699,7 @@
}
void LocationsBuilderARM64::VisitIf(HIf* if_instr) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr);
if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -3747,7 +3720,7 @@
}
void LocationsBuilderARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
InvokeRuntimeCallingConvention calling_convention;
RegisterSet caller_saves = RegisterSet::Empty();
@@ -3768,7 +3741,7 @@
}
void LocationsBuilderARM64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(flag, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -3790,7 +3763,7 @@
}
void LocationsBuilderARM64::VisitSelect(HSelect* select) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select);
if (DataType::IsFloatingPointType(select->GetType())) {
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresFpuRegister());
@@ -3859,7 +3832,7 @@
}
void LocationsBuilderARM64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
- new (GetGraph()->GetArena()) LocationSummary(info);
+ new (GetGraph()->GetAllocator()) LocationSummary(info);
}
void InstructionCodeGeneratorARM64::VisitNativeDebugInfo(HNativeDebugInfo*) {
@@ -3928,7 +3901,8 @@
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
if (baker_read_barrier_slow_path) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -4083,8 +4057,8 @@
kWithoutReadBarrier);
__ Cmp(out, cls);
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARM64(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ B(ne, slow_path->GetEntryLabel());
__ Mov(out, 1);
@@ -4115,8 +4089,8 @@
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARM64(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
if (zero.IsLinked()) {
@@ -4161,7 +4135,8 @@
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
// Add temps for read barriers and other uses. One is used by TypeCheckSlowPathARM64.
@@ -4203,8 +4178,8 @@
!instruction->CanThrowIntoCatchBlock();
}
SlowPathCodeARM64* type_check_slow_path =
- new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction,
- is_type_check_slow_path_fatal);
+ new (GetGraph()->GetAllocator()) TypeCheckSlowPathARM64(instruction,
+ is_type_check_slow_path_fatal);
codegen_->AddSlowPath(type_check_slow_path);
vixl::aarch64::Label done;
@@ -4372,7 +4347,7 @@
}
void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -4381,7 +4356,7 @@
}
void LocationsBuilderARM64::VisitNullConstant(HNullConstant* constant) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -4472,7 +4447,7 @@
}
void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
- IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena(), codegen_);
+ IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetAllocator(), codegen_);
if (intrinsic.TryDispatch(invoke)) {
return;
}
@@ -4485,7 +4460,7 @@
// art::PrepareForRegisterAllocation.
DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
- IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena(), codegen_);
+ IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetAllocator(), codegen_);
if (intrinsic.TryDispatch(invoke)) {
return;
}
@@ -4896,7 +4871,7 @@
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind);
if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -5037,7 +5012,7 @@
bool do_clinit = cls->MustGenerateClinitCheck();
if (generate_null_check || do_clinit) {
DCHECK(cls->CanCallRuntime());
- SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
+ SlowPathCodeARM64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathARM64(
cls, cls, cls->GetDexPc(), do_clinit, bss_entry_temp, bss_entry_adrp_label);
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
@@ -5058,7 +5033,7 @@
void LocationsBuilderARM64::VisitLoadException(HLoadException* load) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -5067,7 +5042,7 @@
}
void LocationsBuilderARM64::VisitClearException(HClearException* clear) {
- new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall);
}
void InstructionCodeGeneratorARM64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
@@ -5094,7 +5069,7 @@
void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind);
if (load->GetLoadKind() == HLoadString::LoadKind::kRuntimeCall) {
InvokeRuntimeCallingConvention calling_convention;
locations->SetOut(calling_convention.GetReturnLocation(load->GetType()));
@@ -5177,7 +5152,7 @@
ldr_label,
kCompilerReadBarrierOption);
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetArena()) LoadStringSlowPathARM64(load, temp, adrp_label);
+ new (GetGraph()->GetAllocator()) LoadStringSlowPathARM64(load, temp, adrp_label);
codegen_->AddSlowPath(slow_path);
__ Cbz(out.X(), slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -5210,7 +5185,7 @@
}
void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -5219,8 +5194,8 @@
}
void LocationsBuilderARM64::VisitMonitorOperation(HMonitorOperation* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
}
@@ -5239,7 +5214,7 @@
void LocationsBuilderARM64::VisitMul(HMul* mul) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall);
switch (mul->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -5279,7 +5254,7 @@
void LocationsBuilderARM64::VisitNeg(HNeg* neg) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall);
switch (neg->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -5316,8 +5291,8 @@
}
void LocationsBuilderARM64::VisitNewArray(HNewArray* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetOut(LocationFrom(x0));
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
@@ -5335,8 +5310,8 @@
}
void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
if (instruction->IsStringAlloc()) {
locations->AddTemp(LocationFrom(kArtMethodRegister));
@@ -5372,7 +5347,7 @@
}
void LocationsBuilderARM64::VisitNot(HNot* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -5390,7 +5365,7 @@
}
void LocationsBuilderARM64::VisitBooleanNot(HBooleanNot* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -5418,7 +5393,8 @@
}
void CodeGeneratorARM64::GenerateExplicitNullCheck(HNullCheck* instruction) {
- SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM64(instruction);
+ SlowPathCodeARM64* slow_path =
+ new (GetGraph()->GetAllocator()) NullCheckSlowPathARM64(instruction);
AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -5448,7 +5424,7 @@
}
void LocationsBuilderARM64::VisitParameterValue(HParameterValue* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
if (location.IsStackSlot()) {
location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
@@ -5465,7 +5441,7 @@
void LocationsBuilderARM64::VisitCurrentMethod(HCurrentMethod* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetOut(LocationFrom(kArtMethodRegister));
}
@@ -5475,7 +5451,7 @@
}
void LocationsBuilderARM64::VisitPhi(HPhi* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
locations->SetInAt(i, Location::Any());
}
@@ -5491,7 +5467,7 @@
LocationSummary::CallKind call_kind =
DataType::IsFloatingPointType(type) ? LocationSummary::kCallOnMainOnly
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(rem, call_kind);
switch (type) {
case DataType::Type::kInt32:
@@ -5563,7 +5539,7 @@
}
void LocationsBuilderARM64::VisitReturn(HReturn* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
DataType::Type return_type = instruction->InputAt(0)->GetType();
locations->SetInAt(0, ARM64ReturnLocation(return_type));
}
@@ -5697,8 +5673,8 @@
}
void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
// In suspend check slow path, usually there are no caller-save registers at all.
// If SIMD instructions are present, however, we force spilling all live SIMD
// registers in full width (since the runtime only saves/restores lower part).
@@ -5722,8 +5698,8 @@
}
void LocationsBuilderARM64::VisitThrow(HThrow* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
}
@@ -5735,7 +5711,7 @@
void LocationsBuilderARM64::VisitTypeConversion(HTypeConversion* conversion) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(conversion, LocationSummary::kNoCall);
DataType::Type input_type = conversion->GetInputType();
DataType::Type result_type = conversion->GetResultType();
DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type))
@@ -5829,7 +5805,7 @@
// Simple implementation of packed switch - generate cascaded compare/jumps.
void LocationsBuilderARM64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -6053,7 +6029,7 @@
// Slow path marking the GC root `root`. The entrypoint will
// be loaded by the slow path code.
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARM64(instruction, root);
+ new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathARM64(instruction, root);
codegen_->AddSlowPath(slow_path);
// /* GcRoot<mirror::Object> */ root = *(obj + offset)
@@ -6312,7 +6288,7 @@
// Slow path marking the object `ref` when the GC is marking. The
// entrypoint will be loaded by the slow path code.
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetArena()) LoadReferenceWithBakerReadBarrierSlowPathARM64(
+ new (GetGraph()->GetAllocator()) LoadReferenceWithBakerReadBarrierSlowPathARM64(
instruction,
ref,
obj,
@@ -6370,7 +6346,7 @@
// Slow path updating the object reference at address `obj + field_offset`
// when the GC is marking. The entrypoint will be loaded by the slow path code.
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetArena()) LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARM64(
+ new (GetGraph()->GetAllocator()) LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARM64(
instruction,
ref,
obj,
@@ -6497,7 +6473,7 @@
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena())
+ SlowPathCodeARM64* slow_path = new (GetGraph()->GetAllocator())
ReadBarrierForHeapReferenceSlowPathARM64(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -6533,7 +6509,7 @@
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathARM64(instruction, out, root);
+ new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathARM64(instruction, out, root);
AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
@@ -6542,7 +6518,7 @@
void LocationsBuilderARM64::VisitClassTableGet(HClassTableGet* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 21da955..e53773c 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -489,7 +489,7 @@
uint32_t GetPreferredSlotsAlignment() const OVERRIDE { return vixl::aarch64::kXRegSizeInBytes; }
JumpTableARM64* CreateJumpTable(HPackedSwitch* switch_instr) {
- jump_tables_.emplace_back(new (GetGraph()->GetArena()) JumpTableARM64(switch_instr));
+ jump_tables_.emplace_back(new (GetGraph()->GetAllocator()) JumpTableARM64(switch_instr));
return jump_tables_.back().get();
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index d4fb064..d7137a3 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -763,7 +763,7 @@
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(
locations->InAt(0),
LocationFrom(calling_convention.GetRegisterAt(0)),
@@ -1414,7 +1414,7 @@
// We're moving two or three locations to locations that could
// overlap, so we need a parallel move resolver.
InvokeRuntimeCallingConventionARMVIXL calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(ref_,
LocationFrom(calling_convention.GetRegisterAt(0)),
DataType::Type::kReference,
@@ -2421,26 +2421,26 @@
ComputeSRegisterListMask(kFpuCalleeSaves),
compiler_options,
stats),
- block_labels_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ block_labels_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ jump_tables_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
location_builder_(graph, this),
instruction_visitor_(graph, this),
- move_resolver_(graph->GetArena(), this),
- assembler_(graph->GetArena()),
+ move_resolver_(graph->GetAllocator(), this),
+ assembler_(graph->GetAllocator()),
isa_features_(isa_features),
uint32_literals_(std::less<uint32_t>(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- string_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- baker_read_barrier_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ baker_read_barrier_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(TypeReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
// Always save the LR register to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(LR));
// Give D30 and D31 as scratch register to VIXL. The register allocator only works on
@@ -2810,7 +2810,7 @@
void CodeGeneratorARMVIXL::MoveLocation(Location dst, Location src, DataType::Type dst_type) {
// TODO(VIXL): Maybe refactor to have the 'move' implementation here and use it in
// `ParallelMoveResolverARMVIXL::EmitMove`, as is done in the `arm64` backend.
- HParallelMove move(GetGraph()->GetArena());
+ HParallelMove move(GetGraph()->GetAllocator());
move.AddMove(src, dst, dst_type, nullptr);
GetMoveResolver()->EmitNativeCode(&move);
}
@@ -3030,7 +3030,7 @@
}
void LocationsBuilderARMVIXL::VisitIf(HIf* if_instr) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr);
if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -3047,7 +3047,7 @@
}
void LocationsBuilderARMVIXL::VisitDeoptimize(HDeoptimize* deoptimize) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
RegisterSet caller_saves = RegisterSet::Empty();
@@ -3068,7 +3068,7 @@
}
void LocationsBuilderARMVIXL::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(flag, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -3081,7 +3081,7 @@
}
void LocationsBuilderARMVIXL::VisitSelect(HSelect* select) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select);
const bool is_floating_point = DataType::IsFloatingPointType(select->GetType());
if (is_floating_point) {
@@ -3222,7 +3222,7 @@
}
void LocationsBuilderARMVIXL::VisitNativeDebugInfo(HNativeDebugInfo* info) {
- new (GetGraph()->GetArena()) LocationSummary(info);
+ new (GetGraph()->GetAllocator()) LocationSummary(info);
}
void InstructionCodeGeneratorARMVIXL::VisitNativeDebugInfo(HNativeDebugInfo*) {
@@ -3312,7 +3312,7 @@
void LocationsBuilderARMVIXL::HandleCondition(HCondition* cond) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(cond, LocationSummary::kNoCall);
// Handle the long/FP comparisons made in instruction simplification.
switch (cond->InputAt(0)->GetType()) {
case DataType::Type::kInt64:
@@ -3471,7 +3471,7 @@
void LocationsBuilderARMVIXL::VisitIntConstant(HIntConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -3481,7 +3481,7 @@
void LocationsBuilderARMVIXL::VisitNullConstant(HNullConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -3491,7 +3491,7 @@
void LocationsBuilderARMVIXL::VisitLongConstant(HLongConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -3501,7 +3501,7 @@
void LocationsBuilderARMVIXL::VisitFloatConstant(HFloatConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -3512,7 +3512,7 @@
void LocationsBuilderARMVIXL::VisitDoubleConstant(HDoubleConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -3548,7 +3548,7 @@
void LocationsBuilderARMVIXL::VisitReturn(HReturn* ret) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(ret, LocationSummary::kNoCall);
locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType()));
}
@@ -3722,7 +3722,7 @@
void LocationsBuilderARMVIXL::VisitNeg(HNeg* neg) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall);
switch (neg->GetResultType()) {
case DataType::Type::kInt32: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -3794,7 +3794,7 @@
? LocationSummary::kCallOnMainOnly
: LocationSummary::kNoCall;
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
+ new (GetGraph()->GetAllocator()) LocationSummary(conversion, call_kind);
switch (result_type) {
case DataType::Type::kUint8:
@@ -4158,7 +4158,7 @@
void LocationsBuilderARMVIXL::VisitAdd(HAdd* add) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(add, LocationSummary::kNoCall);
switch (add->GetResultType()) {
case DataType::Type::kInt32: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -4223,7 +4223,7 @@
void LocationsBuilderARMVIXL::VisitSub(HSub* sub) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(sub, LocationSummary::kNoCall);
switch (sub->GetResultType()) {
case DataType::Type::kInt32: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -4285,7 +4285,7 @@
void LocationsBuilderARMVIXL::VisitMul(HMul* mul) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall);
switch (mul->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64: {
@@ -4494,7 +4494,7 @@
call_kind = LocationSummary::kCallOnMainOnly;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(div, call_kind);
switch (div->GetResultType()) {
case DataType::Type::kInt32: {
@@ -4607,7 +4607,7 @@
call_kind = LocationSummary::kNoCall;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(rem, call_kind);
switch (type) {
case DataType::Type::kInt32: {
@@ -4734,7 +4734,7 @@
void InstructionCodeGeneratorARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instruction) {
DivZeroCheckSlowPathARMVIXL* slow_path =
- new (GetGraph()->GetArena()) DivZeroCheckSlowPathARMVIXL(instruction);
+ new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathARMVIXL(instruction);
codegen_->AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -4872,7 +4872,7 @@
void LocationsBuilderARMVIXL::VisitRor(HRor* ror) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(ror, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(ror, LocationSummary::kNoCall);
switch (ror->GetResultType()) {
case DataType::Type::kInt32: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -4918,7 +4918,7 @@
DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(op, LocationSummary::kNoCall);
switch (op->GetResultType()) {
case DataType::Type::kInt32: {
@@ -5148,8 +5148,8 @@
}
void LocationsBuilderARMVIXL::VisitNewInstance(HNewInstance* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
if (instruction->IsStringAlloc()) {
locations->AddTemp(LocationFrom(kMethodRegister));
} else {
@@ -5182,8 +5182,8 @@
}
void LocationsBuilderARMVIXL::VisitNewArray(HNewArray* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetOut(LocationFrom(r0));
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
@@ -5203,7 +5203,7 @@
void LocationsBuilderARMVIXL::VisitParameterValue(HParameterValue* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
if (location.IsStackSlot()) {
location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
@@ -5220,7 +5220,7 @@
void LocationsBuilderARMVIXL::VisitCurrentMethod(HCurrentMethod* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetOut(LocationFrom(kMethodRegister));
}
@@ -5231,7 +5231,7 @@
void LocationsBuilderARMVIXL::VisitNot(HNot* not_) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(not_, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -5257,7 +5257,7 @@
void LocationsBuilderARMVIXL::VisitBooleanNot(HBooleanNot* bool_not) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(bool_not, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -5268,7 +5268,7 @@
void LocationsBuilderARMVIXL::VisitCompare(HCompare* compare) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(compare, LocationSummary::kNoCall);
switch (compare->InputAt(0)->GetType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -5359,7 +5359,7 @@
void LocationsBuilderARMVIXL::VisitPhi(HPhi* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
locations->SetInAt(i, Location::Any());
}
@@ -5437,7 +5437,7 @@
DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
DataType::Type field_type = field_info.GetFieldType();
@@ -5600,10 +5600,10 @@
bool object_field_get_with_read_barrier =
kEmitCompilerReadBarrier && (field_info.GetFieldType() == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- object_field_get_with_read_barrier ?
- LocationSummary::kCallOnSlowPath :
- LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ object_field_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -5960,7 +5960,7 @@
void CodeGeneratorARMVIXL::GenerateExplicitNullCheck(HNullCheck* instruction) {
NullCheckSlowPathARMVIXL* slow_path =
- new (GetGraph()->GetArena()) NullCheckSlowPathARMVIXL(instruction);
+ new (GetGraph()->GetAllocator()) NullCheckSlowPathARMVIXL(instruction);
AddSlowPath(slow_path);
__ CompareAndBranchIfZero(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel());
}
@@ -6041,10 +6041,10 @@
bool object_array_get_with_read_barrier =
kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- object_array_get_with_read_barrier ?
- LocationSummary::kCallOnSlowPath :
- LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ object_array_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -6325,7 +6325,7 @@
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction,
may_need_runtime_call_for_type_check ?
LocationSummary::kCallOnSlowPath :
@@ -6433,7 +6433,7 @@
SlowPathCodeARMVIXL* slow_path = nullptr;
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathARMVIXL(instruction);
+ slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathARMVIXL(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
vixl32::Label non_zero;
@@ -6607,7 +6607,7 @@
void LocationsBuilderARMVIXL::VisitArrayLength(HArrayLength* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -6631,7 +6631,7 @@
void LocationsBuilderARMVIXL::VisitIntermediateAddress(HIntermediateAddress* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instruction->GetOffset()));
@@ -6694,7 +6694,7 @@
int32_t index = Int32ConstantFrom(index_loc);
if (index < 0 || index >= length) {
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetArena()) BoundsCheckSlowPathARMVIXL(instruction);
+ new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARMVIXL(instruction);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
} else {
@@ -6705,13 +6705,13 @@
}
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetArena()) BoundsCheckSlowPathARMVIXL(instruction);
+ new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARMVIXL(instruction);
__ Cmp(RegisterFrom(index_loc), length);
codegen_->AddSlowPath(slow_path);
__ B(hs, slow_path->GetEntryLabel());
} else {
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetArena()) BoundsCheckSlowPathARMVIXL(instruction);
+ new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARMVIXL(instruction);
__ Cmp(RegisterFrom(length_loc), InputOperandAt(instruction, 0));
codegen_->AddSlowPath(slow_path);
__ B(ls, slow_path->GetEntryLabel());
@@ -6745,8 +6745,8 @@
}
void LocationsBuilderARMVIXL::VisitSuspendCheck(HSuspendCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -6770,7 +6770,8 @@
SuspendCheckSlowPathARMVIXL* slow_path =
down_cast<SuspendCheckSlowPathARMVIXL*>(instruction->GetSlowPath());
if (slow_path == nullptr) {
- slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARMVIXL(instruction, successor);
+ slow_path =
+ new (GetGraph()->GetAllocator()) SuspendCheckSlowPathARMVIXL(instruction, successor);
instruction->SetSlowPath(slow_path);
codegen_->AddSlowPath(slow_path);
if (successor != nullptr) {
@@ -7085,7 +7086,7 @@
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind);
if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -7208,7 +7209,7 @@
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- LoadClassSlowPathARMVIXL* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARMVIXL(
+ LoadClassSlowPathARMVIXL* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathARMVIXL(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
@@ -7225,7 +7226,7 @@
void LocationsBuilderARMVIXL::VisitClinitCheck(HClinitCheck* check) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+ new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
locations->SetInAt(0, Location::RequiresRegister());
if (check->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -7235,10 +7236,10 @@
void InstructionCodeGeneratorARMVIXL::VisitClinitCheck(HClinitCheck* check) {
// We assume the class is not null.
LoadClassSlowPathARMVIXL* slow_path =
- new (GetGraph()->GetArena()) LoadClassSlowPathARMVIXL(check->GetLoadClass(),
- check,
- check->GetDexPc(),
- /* do_clinit */ true);
+ new (GetGraph()->GetAllocator()) LoadClassSlowPathARMVIXL(check->GetLoadClass(),
+ check,
+ check->GetDexPc(),
+ /* do_clinit */ true);
codegen_->AddSlowPath(slow_path);
GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
}
@@ -7279,7 +7280,7 @@
void LocationsBuilderARMVIXL::VisitLoadString(HLoadString* load) {
LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind);
HLoadString::LoadKind load_kind = load->GetLoadKind();
if (load_kind == HLoadString::LoadKind::kRuntimeCall) {
locations->SetOut(LocationFrom(r0));
@@ -7348,7 +7349,7 @@
codegen_->EmitMovwMovtPlaceholder(labels, temp);
GenerateGcRootFieldLoad(load, out_loc, temp, /* offset */ 0, kCompilerReadBarrierOption);
LoadStringSlowPathARMVIXL* slow_path =
- new (GetGraph()->GetArena()) LoadStringSlowPathARMVIXL(load);
+ new (GetGraph()->GetAllocator()) LoadStringSlowPathARMVIXL(load);
codegen_->AddSlowPath(slow_path);
__ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -7382,7 +7383,7 @@
void LocationsBuilderARMVIXL::VisitLoadException(HLoadException* load) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -7393,7 +7394,7 @@
void LocationsBuilderARMVIXL::VisitClearException(HClearException* clear) {
- new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall);
}
void InstructionCodeGeneratorARMVIXL::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
@@ -7404,8 +7405,8 @@
}
void LocationsBuilderARMVIXL::VisitThrow(HThrow* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
}
@@ -7457,7 +7458,8 @@
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
if (baker_read_barrier_slow_path) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -7674,8 +7676,8 @@
kWithoutReadBarrier);
__ Cmp(out, cls);
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARMVIXL(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARMVIXL(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ B(ne, slow_path->GetEntryLabel());
__ Mov(out, 1);
@@ -7703,8 +7705,8 @@
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARMVIXL(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARMVIXL(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
break;
@@ -7741,7 +7743,8 @@
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind));
@@ -7781,8 +7784,8 @@
!instruction->CanThrowIntoCatchBlock();
}
SlowPathCodeARMVIXL* type_check_slow_path =
- new (GetGraph()->GetArena()) TypeCheckSlowPathARMVIXL(instruction,
- is_type_check_slow_path_fatal);
+ new (GetGraph()->GetAllocator()) TypeCheckSlowPathARMVIXL(instruction,
+ is_type_check_slow_path_fatal);
codegen_->AddSlowPath(type_check_slow_path);
vixl32::Label done;
@@ -7957,8 +7960,8 @@
}
void LocationsBuilderARMVIXL::VisitMonitorOperation(HMonitorOperation* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
}
@@ -7989,7 +7992,7 @@
void LocationsBuilderARMVIXL::HandleBitwiseOperation(HBinaryOperation* instruction, Opcode opcode) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
DCHECK(instruction->GetResultType() == DataType::Type::kInt32
|| instruction->GetResultType() == DataType::Type::kInt64);
// Note: GVN reorders commutative operations to have the constant on the right hand side.
@@ -8012,7 +8015,7 @@
void LocationsBuilderARMVIXL::VisitBitwiseNegatedRight(HBitwiseNegatedRight* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
DCHECK(instruction->GetResultType() == DataType::Type::kInt32
|| instruction->GetResultType() == DataType::Type::kInt64);
@@ -8079,7 +8082,7 @@
DCHECK(instruction->GetType() == DataType::Type::kInt32 ||
instruction->GetType() == DataType::Type::kInt64);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
const bool overlap = instruction->GetType() == DataType::Type::kInt64 &&
HDataProcWithShifterOp::IsExtensionOp(instruction->GetOpKind());
@@ -8443,7 +8446,7 @@
// Slow path marking the GC root `root`. The entrypoint will
// be loaded by the slow path code.
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARMVIXL(instruction, root);
+ new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathARMVIXL(instruction, root);
codegen_->AddSlowPath(slow_path);
// /* GcRoot<mirror::Object> */ root = *(obj + offset)
@@ -8692,7 +8695,7 @@
// Slow path marking the object `ref` when the GC is marking. The
// entrypoint will be loaded by the slow path code.
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetArena()) LoadReferenceWithBakerReadBarrierSlowPathARMVIXL(
+ new (GetGraph()->GetAllocator()) LoadReferenceWithBakerReadBarrierSlowPathARMVIXL(
instruction, ref, obj, offset, index, scale_factor, needs_null_check, temp_reg);
AddSlowPath(slow_path);
@@ -8738,8 +8741,8 @@
// Slow path updating the object reference at address `obj + field_offset`
// when the GC is marking. The entrypoint will be loaded by the slow path code.
- SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetArena()) LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARMVIXL(
+ SlowPathCodeARMVIXL* slow_path = new (GetGraph()->GetAllocator())
+ LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARMVIXL(
instruction,
ref,
obj,
@@ -8850,7 +8853,7 @@
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCodeARMVIXL* slow_path = new (GetGraph()->GetArena())
+ SlowPathCodeARMVIXL* slow_path = new (GetGraph()->GetAllocator())
ReadBarrierForHeapReferenceSlowPathARMVIXL(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -8886,7 +8889,7 @@
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathARMVIXL(instruction, out, root);
+ new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathARMVIXL(instruction, out, root);
AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
@@ -9191,7 +9194,7 @@
void LocationsBuilderARMVIXL::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instr, LocationSummary::kNoCall);
locations->SetInAt(HMultiplyAccumulate::kInputAccumulatorIndex,
Location::RequiresRegister());
locations->SetInAt(HMultiplyAccumulate::kInputMulLeftIndex, Location::RequiresRegister());
@@ -9228,7 +9231,7 @@
// Simple implementation of packed switch - generate cascaded compare/jumps.
void LocationsBuilderARMVIXL::VisitPackedSwitch(HPackedSwitch* switch_instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
if (switch_instr->GetNumEntries() > kPackedSwitchCompareJumpThreshold &&
codegen_->GetAssembler()->GetVIXLAssembler()->IsUsingT32()) {
@@ -9342,7 +9345,7 @@
TODO_VIXL32(FATAL);
} else {
// Let the parallel move resolver take care of all of this.
- HParallelMove parallel_move(GetGraph()->GetArena());
+ HParallelMove parallel_move(GetGraph()->GetAllocator());
parallel_move.AddMove(return_loc, trg, type, nullptr);
GetMoveResolver()->EmitNativeCode(¶llel_move);
}
@@ -9350,7 +9353,7 @@
void LocationsBuilderARMVIXL::VisitClassTableGet(HClassTableGet* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 58b8525..c46d17c 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -119,7 +119,7 @@
explicit JumpTableARMVIXL(HPackedSwitch* switch_instr)
: switch_instr_(switch_instr),
table_start_(),
- bb_addresses_(switch_instr->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ bb_addresses_(switch_instr->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
uint32_t num_entries = switch_instr_->GetNumEntries();
for (uint32_t i = 0; i < num_entries; i++) {
VIXLInt32Literal *lit = new VIXLInt32Literal(0, vixl32::RawLiteral::kManuallyPlaced);
@@ -739,7 +739,7 @@
void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
JumpTableARMVIXL* CreateJumpTable(HPackedSwitch* switch_instr) {
- jump_tables_.emplace_back(new (GetGraph()->GetArena()) JumpTableARMVIXL(switch_instr));
+ jump_tables_.emplace_back(new (GetGraph()->GetAllocator()) JumpTableARMVIXL(switch_instr));
return jump_tables_.back().get();
}
void EmitJumpTables();
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 3c592e7..7ea7b9c 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -557,7 +557,7 @@
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(
locations->InAt(0),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
@@ -968,7 +968,7 @@
// We're moving two or three locations to locations that could
// overlap, so we need a parallel move resolver.
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(ref_,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
DataType::Type::kReference,
@@ -1100,19 +1100,19 @@
block_labels_(nullptr),
location_builder_(graph, this),
instruction_visitor_(graph, this),
- move_resolver_(graph->GetArena(), this),
- assembler_(graph->GetArena(), &isa_features),
+ move_resolver_(graph->GetAllocator(), this),
+ assembler_(graph->GetAllocator(), &isa_features),
isa_features_(isa_features),
uint32_literals_(std::less<uint32_t>(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- string_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ jit_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ jit_class_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
clobbered_ra_(false) {
// Save RA (containing the return address) to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(RA));
@@ -1998,7 +1998,7 @@
void InstructionCodeGeneratorMIPS::GenerateSuspendCheck(HSuspendCheck* instruction,
HBasicBlock* successor) {
SuspendCheckSlowPathMIPS* slow_path =
- new (GetGraph()->GetArena()) SuspendCheckSlowPathMIPS(instruction, successor);
+ new (GetGraph()->GetAllocator()) SuspendCheckSlowPathMIPS(instruction, successor);
codegen_->AddSlowPath(slow_path);
__ LoadFromOffset(kLoadUnsignedHalfword,
@@ -2023,7 +2023,7 @@
void LocationsBuilderMIPS::HandleBinaryOp(HBinaryOperation* instruction) {
DCHECK_EQ(instruction->InputCount(), 2U);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
DataType::Type type = instruction->GetResultType();
switch (type) {
case DataType::Type::kInt32: {
@@ -2289,7 +2289,7 @@
void LocationsBuilderMIPS::HandleShift(HBinaryOperation* instr) {
DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instr);
DataType::Type type = instr->GetResultType();
switch (type) {
case DataType::Type::kInt32:
@@ -2542,10 +2542,10 @@
bool object_array_get_with_read_barrier =
kEmitCompilerReadBarrier && (type == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- object_array_get_with_read_barrier
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ object_array_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -2824,7 +2824,7 @@
}
void LocationsBuilderMIPS::VisitArrayLength(HArrayLength* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -2868,7 +2868,7 @@
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction,
may_need_runtime_call_for_type_check ?
LocationSummary::kCallOnSlowPath :
@@ -2986,7 +2986,7 @@
SlowPathCodeMIPS* slow_path = nullptr;
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathMIPS(instruction);
+ slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathMIPS(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
MipsLabel non_zero;
@@ -3141,7 +3141,7 @@
void LocationsBuilderMIPS::VisitIntermediateArrayAddressIndex(
HIntermediateArrayAddressIndex* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
HIntConstant* shift = instruction->GetShift()->AsIntConstant();
@@ -3171,7 +3171,7 @@
void InstructionCodeGeneratorMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations = instruction->GetLocations();
BoundsCheckSlowPathMIPS* slow_path =
- new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS(instruction);
+ new (GetGraph()->GetAllocator()) BoundsCheckSlowPathMIPS(instruction);
codegen_->AddSlowPath(slow_path);
Register index = locations->InAt(0).AsRegister<Register>();
@@ -3222,7 +3222,8 @@
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind));
@@ -3262,8 +3263,8 @@
!instruction->CanThrowIntoCatchBlock();
}
SlowPathCodeMIPS* slow_path =
- new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction,
- is_type_check_slow_path_fatal);
+ new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS(instruction,
+ is_type_check_slow_path_fatal);
codegen_->AddSlowPath(slow_path);
// Avoid this check if we know `obj` is not null.
@@ -3417,7 +3418,7 @@
void LocationsBuilderMIPS::VisitClinitCheck(HClinitCheck* check) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+ new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
locations->SetInAt(0, Location::RequiresRegister());
if (check->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -3426,7 +3427,7 @@
void InstructionCodeGeneratorMIPS::VisitClinitCheck(HClinitCheck* check) {
// We assume the class is not null.
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS(
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS(
check->GetLoadClass(),
check,
check->GetDexPc(),
@@ -3440,7 +3441,7 @@
DataType::Type in_type = compare->InputAt(0)->GetType();
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(compare, LocationSummary::kNoCall);
switch (in_type) {
case DataType::Type::kBool:
@@ -3601,7 +3602,7 @@
}
void LocationsBuilderMIPS::HandleCondition(HCondition* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
switch (instruction->InputAt(0)->GetType()) {
default:
case DataType::Type::kInt64:
@@ -3815,7 +3816,7 @@
? LocationSummary::kCallOnMainOnly
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(div, call_kind);
switch (type) {
case DataType::Type::kInt32:
@@ -3882,7 +3883,8 @@
}
void InstructionCodeGeneratorMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathMIPS(instruction);
+ SlowPathCodeMIPS* slow_path =
+ new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathMIPS(instruction);
codegen_->AddSlowPath(slow_path);
Location value = instruction->GetLocations()->InAt(0);
DataType::Type type = instruction->GetType();
@@ -3929,7 +3931,7 @@
void LocationsBuilderMIPS::VisitDoubleConstant(HDoubleConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -3946,7 +3948,7 @@
void LocationsBuilderMIPS::VisitFloatConstant(HFloatConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -5526,7 +5528,7 @@
}
void LocationsBuilderMIPS::VisitIf(HIf* if_instr) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr);
if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -5543,7 +5545,7 @@
}
void LocationsBuilderMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
InvokeRuntimeCallingConvention calling_convention;
RegisterSet caller_saves = RegisterSet::Empty();
@@ -6098,7 +6100,7 @@
}
void LocationsBuilderMIPS::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(flag, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -6111,7 +6113,7 @@
}
void LocationsBuilderMIPS::VisitSelect(HSelect* select) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select);
CanMoveConditionally(select, codegen_->GetInstructionSetFeatures().IsR6(), locations);
}
@@ -6136,7 +6138,7 @@
}
void LocationsBuilderMIPS::VisitNativeDebugInfo(HNativeDebugInfo* info) {
- new (GetGraph()->GetArena()) LocationSummary(info);
+ new (GetGraph()->GetAllocator()) LocationSummary(info);
}
void InstructionCodeGeneratorMIPS::VisitNativeDebugInfo(HNativeDebugInfo*) {
@@ -6153,7 +6155,7 @@
bool generate_volatile = field_info.IsVolatile() && is_wide;
bool object_field_get_with_read_barrier =
kEmitCompilerReadBarrier && (field_type == DataType::Type::kReference);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction,
generate_volatile
? LocationSummary::kCallOnMainOnly
@@ -6327,7 +6329,7 @@
DataType::Type field_type = field_info.GetFieldType();
bool is_wide = (field_type == DataType::Type::kInt64) || (field_type == DataType::Type::kFloat64);
bool generate_volatile = field_info.IsVolatile() && is_wide;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction, generate_volatile ? LocationSummary::kCallOnMainOnly : LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
@@ -6691,7 +6693,7 @@
// Slow path marking the GC root `root`.
Location temp = Location::RegisterLocation(T9);
SlowPathCodeMIPS* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS(
+ new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS(
instruction,
root,
/*entrypoint*/ temp);
@@ -7018,14 +7020,14 @@
// to be null in this code path.
DCHECK_EQ(offset, 0u);
DCHECK_EQ(scale_factor, ScaleFactor::TIMES_1);
- slow_path = new (GetGraph()->GetArena())
+ slow_path = new (GetGraph()->GetAllocator())
ReadBarrierMarkAndUpdateFieldSlowPathMIPS(instruction,
ref,
obj,
/* field_offset */ index,
temp_reg);
} else {
- slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS(instruction, ref);
+ slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS(instruction, ref);
}
AddSlowPath(slow_path);
@@ -7061,7 +7063,7 @@
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena())
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator())
ReadBarrierForHeapReferenceSlowPathMIPS(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -7097,7 +7099,7 @@
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCodeMIPS* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathMIPS(instruction, out, root);
+ new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathMIPS(instruction, out, root);
AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
@@ -7124,7 +7126,8 @@
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
if (baker_read_barrier_slow_path) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -7266,8 +7269,8 @@
maybe_temp_loc,
kWithoutReadBarrier);
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ Bne(out, cls, slow_path->GetEntryLabel());
__ LoadConst32(out, 1);
@@ -7295,8 +7298,8 @@
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
break;
@@ -7311,7 +7314,7 @@
}
void LocationsBuilderMIPS::VisitIntConstant(HIntConstant* constant) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -7320,7 +7323,7 @@
}
void LocationsBuilderMIPS::VisitNullConstant(HNullConstant* constant) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -7661,7 +7664,7 @@
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind);
if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -7839,7 +7842,7 @@
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS(
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck(), bss_info_high);
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
@@ -7859,7 +7862,7 @@
void LocationsBuilderMIPS::VisitLoadException(HLoadException* load) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -7869,7 +7872,7 @@
}
void LocationsBuilderMIPS::VisitClearException(HClearException* clear) {
- new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall);
}
void InstructionCodeGeneratorMIPS::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
@@ -7878,7 +7881,7 @@
void LocationsBuilderMIPS::VisitLoadString(HLoadString* load) {
LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind);
HLoadString::LoadKind load_kind = load->GetLoadKind();
const bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
const bool has_irreducible_loops = codegen_->GetGraph()->HasIrreducibleLoops();
@@ -8004,7 +8007,7 @@
kCompilerReadBarrierOption,
&info_low->label);
SlowPathCodeMIPS* slow_path =
- new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load, info_high);
+ new (GetGraph()->GetAllocator()) LoadStringSlowPathMIPS(load, info_high);
codegen_->AddSlowPath(slow_path);
__ Beqz(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -8041,7 +8044,7 @@
}
void LocationsBuilderMIPS::VisitLongConstant(HLongConstant* constant) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -8050,8 +8053,8 @@
}
void LocationsBuilderMIPS::VisitMonitorOperation(HMonitorOperation* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
}
@@ -8068,7 +8071,7 @@
void LocationsBuilderMIPS::VisitMul(HMul* mul) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall);
switch (mul->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -8163,7 +8166,7 @@
void LocationsBuilderMIPS::VisitNeg(HNeg* neg) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall);
switch (neg->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -8221,8 +8224,8 @@
}
void LocationsBuilderMIPS::VisitNewArray(HNewArray* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kReference));
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -8240,8 +8243,8 @@
}
void LocationsBuilderMIPS::VisitNewInstance(HNewInstance* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
if (instruction->IsStringAlloc()) {
locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
@@ -8270,7 +8273,7 @@
}
void LocationsBuilderMIPS::VisitNot(HNot* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -8303,7 +8306,7 @@
}
void LocationsBuilderMIPS::VisitBooleanNot(HBooleanNot* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -8331,7 +8334,7 @@
}
void CodeGeneratorMIPS::GenerateExplicitNullCheck(HNullCheck* instruction) {
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathMIPS(instruction);
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator()) NullCheckSlowPathMIPS(instruction);
AddSlowPath(slow_path);
Location obj = instruction->GetLocations()->InAt(0);
@@ -8360,7 +8363,7 @@
}
void LocationsBuilderMIPS::VisitParameterValue(HParameterValue* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
if (location.IsStackSlot()) {
location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
@@ -8377,7 +8380,7 @@
void LocationsBuilderMIPS::VisitCurrentMethod(HCurrentMethod* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
}
@@ -8387,7 +8390,7 @@
}
void LocationsBuilderMIPS::VisitPhi(HPhi* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
locations->SetInAt(i, Location::Any());
}
@@ -8403,7 +8406,7 @@
LocationSummary::CallKind call_kind = (type == DataType::Type::kInt32)
? LocationSummary::kNoCall
: LocationSummary::kCallOnMainOnly;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(rem, call_kind);
switch (type) {
case DataType::Type::kInt32:
@@ -8481,7 +8484,7 @@
}
void LocationsBuilderMIPS::VisitReturn(HReturn* ret) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(ret);
DataType::Type return_type = ret->InputAt(0)->GetType();
locations->SetInAt(0, MipsReturnLocation(return_type));
}
@@ -8622,8 +8625,8 @@
}
void LocationsBuilderMIPS::VisitSuspendCheck(HSuspendCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
// In suspend check slow path, usually there are no caller-save registers at all.
// If SIMD instructions are present, however, we force spilling all live SIMD
// registers in full width (since the runtime only saves/restores lower part).
@@ -8646,8 +8649,8 @@
}
void LocationsBuilderMIPS::VisitThrow(HThrow* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
}
@@ -8676,7 +8679,8 @@
call_kind = LocationSummary::kCallOnMainOnly;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(conversion, call_kind);
if (call_kind == LocationSummary::kNoCall) {
if (DataType::IsFloatingPointType(input_type)) {
@@ -9014,7 +9018,7 @@
void LocationsBuilderMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -9123,7 +9127,7 @@
void LocationsBuilderMIPS::VisitMipsPackedSwitch(HMipsPackedSwitch* switch_instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
// Constant area pointer (HMipsComputeBaseMethodAddress).
locations->SetInAt(1, Location::RequiresRegister());
@@ -9152,7 +9156,7 @@
void LocationsBuilderMIPS::VisitMipsComputeBaseMethodAddress(
HMipsComputeBaseMethodAddress* insn) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(insn, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(insn, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -9185,7 +9189,7 @@
void LocationsBuilderMIPS::VisitClassTableGet(HClassTableGet* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 6877003..fad0fe7 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -512,7 +512,7 @@
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(
locations->InAt(0),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
@@ -910,7 +910,7 @@
// We're moving two or three locations to locations that could
// overlap, so we need a parallel move resolver.
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(ref_,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
DataType::Type::kReference,
@@ -1041,23 +1041,23 @@
block_labels_(nullptr),
location_builder_(graph, this),
instruction_visitor_(graph, this),
- move_resolver_(graph->GetArena(), this),
- assembler_(graph->GetArena(), &isa_features),
+ move_resolver_(graph->GetAllocator(), this),
+ assembler_(graph->GetAllocator(), &isa_features),
isa_features_(isa_features),
uint32_literals_(std::less<uint32_t>(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
uint64_literals_(std::less<uint64_t>(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- string_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(TypeReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
// Save RA (containing the return address) to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(RA));
}
@@ -1835,7 +1835,7 @@
void InstructionCodeGeneratorMIPS64::GenerateSuspendCheck(HSuspendCheck* instruction,
HBasicBlock* successor) {
SuspendCheckSlowPathMIPS64* slow_path =
- new (GetGraph()->GetArena()) SuspendCheckSlowPathMIPS64(instruction, successor);
+ new (GetGraph()->GetAllocator()) SuspendCheckSlowPathMIPS64(instruction, successor);
codegen_->AddSlowPath(slow_path);
__ LoadFromOffset(kLoadUnsignedHalfword,
@@ -1860,7 +1860,7 @@
void LocationsBuilderMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
DCHECK_EQ(instruction->InputCount(), 2U);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
DataType::Type type = instruction->GetResultType();
switch (type) {
case DataType::Type::kInt32:
@@ -1990,7 +1990,7 @@
void LocationsBuilderMIPS64::HandleShift(HBinaryOperation* instr) {
DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instr);
DataType::Type type = instr->GetResultType();
switch (type) {
case DataType::Type::kInt32:
@@ -2119,10 +2119,10 @@
bool object_array_get_with_read_barrier =
kEmitCompilerReadBarrier && (type == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- object_array_get_with_read_barrier
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ object_array_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -2385,7 +2385,7 @@
}
void LocationsBuilderMIPS64::VisitArrayLength(HArrayLength* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -2429,7 +2429,7 @@
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction,
may_need_runtime_call_for_type_check ?
LocationSummary::kCallOnSlowPath :
@@ -2543,7 +2543,7 @@
SlowPathCodeMIPS64* slow_path = nullptr;
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathMIPS64(instruction);
+ slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathMIPS64(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
Mips64Label non_zero;
@@ -2700,7 +2700,7 @@
void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations = instruction->GetLocations();
BoundsCheckSlowPathMIPS64* slow_path =
- new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS64(instruction);
+ new (GetGraph()->GetAllocator()) BoundsCheckSlowPathMIPS64(instruction);
codegen_->AddSlowPath(slow_path);
GpuRegister index = locations->InAt(0).AsRegister<GpuRegister>();
@@ -2751,7 +2751,8 @@
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind));
@@ -2791,8 +2792,8 @@
!instruction->CanThrowIntoCatchBlock();
}
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction,
- is_type_check_slow_path_fatal);
+ new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS64(instruction,
+ is_type_check_slow_path_fatal);
codegen_->AddSlowPath(slow_path);
// Avoid this check if we know `obj` is not null.
@@ -2946,7 +2947,7 @@
void LocationsBuilderMIPS64::VisitClinitCheck(HClinitCheck* check) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+ new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
locations->SetInAt(0, Location::RequiresRegister());
if (check->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -2955,7 +2956,7 @@
void InstructionCodeGeneratorMIPS64::VisitClinitCheck(HClinitCheck* check) {
// We assume the class is not null.
- SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
+ SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS64(
check->GetLoadClass(),
check,
check->GetDexPc(),
@@ -2968,7 +2969,7 @@
void LocationsBuilderMIPS64::VisitCompare(HCompare* compare) {
DataType::Type in_type = compare->InputAt(0)->GetType();
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(compare);
switch (in_type) {
case DataType::Type::kBool:
@@ -3088,7 +3089,7 @@
}
void LocationsBuilderMIPS64::HandleCondition(HCondition* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
switch (instruction->InputAt(0)->GetType()) {
default:
case DataType::Type::kInt64:
@@ -3376,7 +3377,7 @@
void LocationsBuilderMIPS64::VisitDiv(HDiv* div) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(div, LocationSummary::kNoCall);
switch (div->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -3429,7 +3430,7 @@
void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetArena()) DivZeroCheckSlowPathMIPS64(instruction);
+ new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathMIPS64(instruction);
codegen_->AddSlowPath(slow_path);
Location value = instruction->GetLocations()->InAt(0);
@@ -3455,7 +3456,7 @@
void LocationsBuilderMIPS64::VisitDoubleConstant(HDoubleConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -3472,7 +3473,7 @@
void LocationsBuilderMIPS64::VisitFloatConstant(HFloatConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -4255,7 +4256,7 @@
}
void LocationsBuilderMIPS64::VisitIf(HIf* if_instr) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr);
if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -4272,7 +4273,7 @@
}
void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
InvokeRuntimeCallingConvention calling_convention;
RegisterSet caller_saves = RegisterSet::Empty();
@@ -4594,7 +4595,7 @@
}
void LocationsBuilderMIPS64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(flag, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -4607,7 +4608,7 @@
}
void LocationsBuilderMIPS64::VisitSelect(HSelect* select) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select);
CanMoveConditionally(select, locations);
}
@@ -4627,7 +4628,7 @@
}
void LocationsBuilderMIPS64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
- new (GetGraph()->GetArena()) LocationSummary(info);
+ new (GetGraph()->GetAllocator()) LocationSummary(info);
}
void InstructionCodeGeneratorMIPS64::VisitNativeDebugInfo(HNativeDebugInfo*) {
@@ -4643,7 +4644,7 @@
DataType::Type field_type = field_info.GetFieldType();
bool object_field_get_with_read_barrier =
kEmitCompilerReadBarrier && (field_type == DataType::Type::kReference);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction,
object_field_get_with_read_barrier
? LocationSummary::kCallOnSlowPath
@@ -4761,7 +4762,7 @@
void LocationsBuilderMIPS64::HandleFieldSet(HInstruction* instruction,
const FieldInfo& field_info ATTRIBUTE_UNUSED) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
if (DataType::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
locations->SetInAt(1, FpuRegisterOrConstantForStore(instruction->InputAt(1)));
@@ -5050,7 +5051,7 @@
// Slow path marking the GC root `root`.
Location temp = Location::RegisterLocation(T9);
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS64(
+ new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS64(
instruction,
root,
/*entrypoint*/ temp);
@@ -5335,14 +5336,14 @@
// above are expected to be null in this code path.
DCHECK_EQ(offset, 0u);
DCHECK_EQ(scale_factor, ScaleFactor::TIMES_1);
- slow_path = new (GetGraph()->GetArena())
+ slow_path = new (GetGraph()->GetAllocator())
ReadBarrierMarkAndUpdateFieldSlowPathMIPS64(instruction,
ref,
obj,
/* field_offset */ index,
temp_reg);
} else {
- slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS64(instruction, ref);
+ slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS64(instruction, ref);
}
AddSlowPath(slow_path);
@@ -5378,7 +5379,7 @@
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena())
+ SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetAllocator())
ReadBarrierForHeapReferenceSlowPathMIPS64(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -5414,7 +5415,7 @@
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathMIPS64(instruction, out, root);
+ new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathMIPS64(instruction, out, root);
AddSlowPath(slow_path);
__ Bc(slow_path->GetEntryLabel());
@@ -5441,7 +5442,8 @@
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
if (baker_read_barrier_slow_path) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -5583,8 +5585,8 @@
maybe_temp_loc,
kWithoutReadBarrier);
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS64(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ Bnec(out, cls, slow_path->GetEntryLabel());
__ LoadConst32(out, 1);
@@ -5612,8 +5614,8 @@
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS64(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ Bc(slow_path->GetEntryLabel());
break;
@@ -5628,7 +5630,7 @@
}
void LocationsBuilderMIPS64::VisitIntConstant(HIntConstant* constant) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -5637,7 +5639,7 @@
}
void LocationsBuilderMIPS64::VisitNullConstant(HNullConstant* constant) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -5952,7 +5954,7 @@
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind);
if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -6081,7 +6083,7 @@
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
+ SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS64(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck(), bss_info_high);
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
@@ -6101,7 +6103,7 @@
void LocationsBuilderMIPS64::VisitLoadException(HLoadException* load) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -6111,7 +6113,7 @@
}
void LocationsBuilderMIPS64::VisitClearException(HClearException* clear) {
- new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall);
}
void InstructionCodeGeneratorMIPS64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
@@ -6121,7 +6123,7 @@
void LocationsBuilderMIPS64::VisitLoadString(HLoadString* load) {
HLoadString::LoadKind load_kind = load->GetLoadKind();
LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind);
if (load_kind == HLoadString::LoadKind::kRuntimeCall) {
InvokeRuntimeCallingConvention calling_convention;
locations->SetOut(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -6199,7 +6201,7 @@
kCompilerReadBarrierOption,
&info_low->label);
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load, info_high);
+ new (GetGraph()->GetAllocator()) LoadStringSlowPathMIPS64(load, info_high);
codegen_->AddSlowPath(slow_path);
__ Beqzc(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -6227,7 +6229,7 @@
}
void LocationsBuilderMIPS64::VisitLongConstant(HLongConstant* constant) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -6236,8 +6238,8 @@
}
void LocationsBuilderMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
}
@@ -6255,7 +6257,7 @@
void LocationsBuilderMIPS64::VisitMul(HMul* mul) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall);
switch (mul->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -6310,7 +6312,7 @@
void LocationsBuilderMIPS64::VisitNeg(HNeg* neg) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall);
switch (neg->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -6360,8 +6362,8 @@
}
void LocationsBuilderMIPS64::VisitNewArray(HNewArray* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kReference));
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -6379,8 +6381,8 @@
}
void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
if (instruction->IsStringAlloc()) {
locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
@@ -6410,7 +6412,7 @@
}
void LocationsBuilderMIPS64::VisitNot(HNot* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -6434,7 +6436,7 @@
}
void LocationsBuilderMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -6462,7 +6464,8 @@
}
void CodeGeneratorMIPS64::GenerateExplicitNullCheck(HNullCheck* instruction) {
- SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathMIPS64(instruction);
+ SlowPathCodeMIPS64* slow_path =
+ new (GetGraph()->GetAllocator()) NullCheckSlowPathMIPS64(instruction);
AddSlowPath(slow_path);
Location obj = instruction->GetLocations()->InAt(0);
@@ -6491,7 +6494,7 @@
}
void LocationsBuilderMIPS64::VisitParameterValue(HParameterValue* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
if (location.IsStackSlot()) {
location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
@@ -6508,7 +6511,7 @@
void LocationsBuilderMIPS64::VisitCurrentMethod(HCurrentMethod* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
}
@@ -6518,7 +6521,7 @@
}
void LocationsBuilderMIPS64::VisitPhi(HPhi* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
locations->SetInAt(i, Location::Any());
}
@@ -6534,7 +6537,7 @@
LocationSummary::CallKind call_kind =
DataType::IsFloatingPointType(type) ? LocationSummary::kCallOnMainOnly
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(rem, call_kind);
switch (type) {
case DataType::Type::kInt32:
@@ -6602,7 +6605,7 @@
}
void LocationsBuilderMIPS64::VisitReturn(HReturn* ret) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(ret);
DataType::Type return_type = ret->InputAt(0)->GetType();
locations->SetInAt(0, Mips64ReturnLocation(return_type));
}
@@ -6736,8 +6739,8 @@
}
void LocationsBuilderMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
// In suspend check slow path, usually there are no caller-save registers at all.
// If SIMD instructions are present, however, we force spilling all live SIMD
// registers in full width (since the runtime only saves/restores lower part).
@@ -6760,8 +6763,8 @@
}
void LocationsBuilderMIPS64::VisitThrow(HThrow* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
}
@@ -6782,7 +6785,7 @@
LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(conversion);
if (DataType::IsFloatingPointType(input_type)) {
locations->SetInAt(0, Location::RequiresFpuRegister());
@@ -7014,7 +7017,7 @@
// Simple implementation of packed switch - generate cascaded compare/jumps.
void LocationsBuilderMIPS64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -7110,7 +7113,7 @@
void LocationsBuilderMIPS64::VisitClassTableGet(HClassTableGet* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
diff --git a/compiler/optimizing/code_generator_vector_arm64.cc b/compiler/optimizing/code_generator_vector_arm64.cc
index b2aec1e..10aced0 100644
--- a/compiler/optimizing/code_generator_vector_arm64.cc
+++ b/compiler/optimizing/code_generator_vector_arm64.cc
@@ -38,7 +38,7 @@
#define __ GetVIXLAssembler()->
void LocationsBuilderARM64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
HInstruction* input = instruction->InputAt(0);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
@@ -131,7 +131,7 @@
}
void LocationsBuilderARM64::VisitVecExtractScalar(HVecExtractScalar* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -179,8 +179,8 @@
}
// Helper to set up locations for vector unary operations.
-static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
locations->SetInAt(0, Location::RequiresFpuRegister());
@@ -206,7 +206,7 @@
}
void LocationsBuilderARM64::VisitVecReduce(HVecReduce* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecReduce(HVecReduce* instruction) {
@@ -246,7 +246,7 @@
}
void LocationsBuilderARM64::VisitVecCnv(HVecCnv* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecCnv(HVecCnv* instruction) {
@@ -264,7 +264,7 @@
}
void LocationsBuilderARM64::VisitVecNeg(HVecNeg* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecNeg(HVecNeg* instruction) {
@@ -305,7 +305,7 @@
}
void LocationsBuilderARM64::VisitVecAbs(HVecAbs* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecAbs(HVecAbs* instruction) {
@@ -344,7 +344,7 @@
}
void LocationsBuilderARM64::VisitVecNot(HVecNot* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecNot(HVecNot* instruction) {
@@ -372,8 +372,8 @@
}
// Helper to set up locations for vector binary operations.
-static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -395,7 +395,7 @@
}
void LocationsBuilderARM64::VisitVecAdd(HVecAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecAdd(HVecAdd* instruction) {
@@ -437,7 +437,7 @@
}
void LocationsBuilderARM64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
@@ -477,7 +477,7 @@
}
void LocationsBuilderARM64::VisitVecSub(HVecSub* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecSub(HVecSub* instruction) {
@@ -519,7 +519,7 @@
}
void LocationsBuilderARM64::VisitVecMul(HVecMul* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecMul(HVecMul* instruction) {
@@ -557,7 +557,7 @@
}
void LocationsBuilderARM64::VisitVecDiv(HVecDiv* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecDiv(HVecDiv* instruction) {
@@ -581,7 +581,7 @@
}
void LocationsBuilderARM64::VisitVecMin(HVecMin* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecMin(HVecMin* instruction) {
@@ -631,7 +631,7 @@
}
void LocationsBuilderARM64::VisitVecMax(HVecMax* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecMax(HVecMax* instruction) {
@@ -682,7 +682,7 @@
void LocationsBuilderARM64::VisitVecAnd(HVecAnd* instruction) {
// TODO: Allow constants supported by BIC (vector, immediate).
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecAnd(HVecAnd* instruction) {
@@ -718,7 +718,7 @@
}
void LocationsBuilderARM64::VisitVecOr(HVecOr* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecOr(HVecOr* instruction) {
@@ -745,7 +745,7 @@
}
void LocationsBuilderARM64::VisitVecXor(HVecXor* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecXor(HVecXor* instruction) {
@@ -772,8 +772,8 @@
}
// Helper to set up locations for vector shift operations.
-static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kUint8:
case DataType::Type::kInt8:
@@ -792,7 +792,7 @@
}
void LocationsBuilderARM64::VisitVecShl(HVecShl* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecShl(HVecShl* instruction) {
@@ -826,7 +826,7 @@
}
void LocationsBuilderARM64::VisitVecShr(HVecShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecShr(HVecShr* instruction) {
@@ -860,7 +860,7 @@
}
void LocationsBuilderARM64::VisitVecUShr(HVecUShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecUShr(HVecUShr* instruction) {
@@ -894,7 +894,7 @@
}
void LocationsBuilderARM64::VisitVecSetScalars(HVecSetScalars* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
DCHECK_EQ(1u, instruction->InputCount()); // only one input currently implemented
@@ -967,8 +967,8 @@
}
// Helper to set up locations for vector accumulations.
-static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kUint8:
case DataType::Type::kInt8:
@@ -988,7 +988,7 @@
}
void LocationsBuilderARM64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
// Some early revisions of the Cortex-A53 have an erratum (835769) whereby it is possible for a
@@ -1036,7 +1036,7 @@
}
void LocationsBuilderARM64::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
// Some conversions require temporary registers.
LocationSummary* locations = instruction->GetLocations();
HVecOperation* a = instruction->InputAt(1)->AsVecOperation();
@@ -1216,10 +1216,10 @@
}
// Helper to set up locations for vector memory operations.
-static void CreateVecMemLocations(ArenaAllocator* arena,
+static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
bool is_load) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -1281,7 +1281,7 @@
}
void LocationsBuilderARM64::VisitVecLoad(HVecLoad* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ true);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ true);
}
void InstructionCodeGeneratorARM64::VisitVecLoad(HVecLoad* instruction) {
@@ -1339,7 +1339,7 @@
}
void LocationsBuilderARM64::VisitVecStore(HVecStore* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ false);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ false);
}
void InstructionCodeGeneratorARM64::VisitVecStore(HVecStore* instruction) {
diff --git a/compiler/optimizing/code_generator_vector_arm_vixl.cc b/compiler/optimizing/code_generator_vector_arm_vixl.cc
index df75752..f84408d 100644
--- a/compiler/optimizing/code_generator_vector_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_vector_arm_vixl.cc
@@ -33,7 +33,7 @@
#define __ GetVIXLAssembler()->
void LocationsBuilderARMVIXL::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -84,8 +84,8 @@
}
// Helper to set up locations for vector unary operations.
-static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
locations->SetInAt(0, Location::RequiresFpuRegister());
@@ -108,7 +108,7 @@
}
void LocationsBuilderARMVIXL::VisitVecReduce(HVecReduce* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecReduce(HVecReduce* instruction) {
@@ -116,7 +116,7 @@
}
void LocationsBuilderARMVIXL::VisitVecCnv(HVecCnv* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecCnv(HVecCnv* instruction) {
@@ -124,7 +124,7 @@
}
void LocationsBuilderARMVIXL::VisitVecNeg(HVecNeg* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecNeg(HVecNeg* instruction) {
@@ -153,7 +153,7 @@
}
void LocationsBuilderARMVIXL::VisitVecAbs(HVecAbs* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecAbs(HVecAbs* instruction) {
@@ -180,7 +180,7 @@
}
void LocationsBuilderARMVIXL::VisitVecNot(HVecNot* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecNot(HVecNot* instruction) {
@@ -207,8 +207,8 @@
}
// Helper to set up locations for vector binary operations.
-static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -227,7 +227,7 @@
}
void LocationsBuilderARMVIXL::VisitVecAdd(HVecAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecAdd(HVecAdd* instruction) {
@@ -257,7 +257,7 @@
}
void LocationsBuilderARMVIXL::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
@@ -297,7 +297,7 @@
}
void LocationsBuilderARMVIXL::VisitVecSub(HVecSub* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecSub(HVecSub* instruction) {
@@ -327,7 +327,7 @@
}
void LocationsBuilderARMVIXL::VisitVecMul(HVecMul* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecMul(HVecMul* instruction) {
@@ -357,7 +357,7 @@
}
void LocationsBuilderARMVIXL::VisitVecDiv(HVecDiv* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecDiv(HVecDiv* instruction) {
@@ -365,7 +365,7 @@
}
void LocationsBuilderARMVIXL::VisitVecMin(HVecMin* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecMin(HVecMin* instruction) {
@@ -405,7 +405,7 @@
}
void LocationsBuilderARMVIXL::VisitVecMax(HVecMax* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecMax(HVecMax* instruction) {
@@ -446,7 +446,7 @@
void LocationsBuilderARMVIXL::VisitVecAnd(HVecAnd* instruction) {
// TODO: Allow constants supported by VAND (immediate).
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecAnd(HVecAnd* instruction) {
@@ -470,7 +470,7 @@
}
void LocationsBuilderARMVIXL::VisitVecAndNot(HVecAndNot* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecAndNot(HVecAndNot* instruction) {
@@ -478,7 +478,7 @@
}
void LocationsBuilderARMVIXL::VisitVecOr(HVecOr* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecOr(HVecOr* instruction) {
@@ -502,7 +502,7 @@
}
void LocationsBuilderARMVIXL::VisitVecXor(HVecXor* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecXor(HVecXor* instruction) {
@@ -526,8 +526,8 @@
}
// Helper to set up locations for vector shift operations.
-static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kUint8:
case DataType::Type::kInt8:
@@ -545,7 +545,7 @@
}
void LocationsBuilderARMVIXL::VisitVecShl(HVecShl* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecShl(HVecShl* instruction) {
@@ -575,7 +575,7 @@
}
void LocationsBuilderARMVIXL::VisitVecShr(HVecShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecShr(HVecShr* instruction) {
@@ -605,7 +605,7 @@
}
void LocationsBuilderARMVIXL::VisitVecUShr(HVecUShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecUShr(HVecUShr* instruction) {
@@ -643,8 +643,8 @@
}
// Helper to set up locations for vector accumulations.
-static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kUint8:
case DataType::Type::kInt8:
@@ -664,7 +664,7 @@
}
void LocationsBuilderARMVIXL::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
@@ -672,7 +672,7 @@
}
void LocationsBuilderARMVIXL::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
@@ -686,10 +686,10 @@
}
// Helper to set up locations for vector memory operations.
-static void CreateVecMemLocations(ArenaAllocator* arena,
+static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
bool is_load) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -767,7 +767,7 @@
}
void LocationsBuilderARMVIXL::VisitVecLoad(HVecLoad* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ true);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ true);
}
void InstructionCodeGeneratorARMVIXL::VisitVecLoad(HVecLoad* instruction) {
@@ -818,7 +818,7 @@
}
void LocationsBuilderARMVIXL::VisitVecStore(HVecStore* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ false);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ false);
}
void InstructionCodeGeneratorARMVIXL::VisitVecStore(HVecStore* instruction) {
diff --git a/compiler/optimizing/code_generator_vector_mips.cc b/compiler/optimizing/code_generator_vector_mips.cc
index e8c5157..c5a39ff 100644
--- a/compiler/optimizing/code_generator_vector_mips.cc
+++ b/compiler/optimizing/code_generator_vector_mips.cc
@@ -24,7 +24,7 @@
#define __ down_cast<MipsAssembler*>(GetAssembler())-> // NOLINT
void LocationsBuilderMIPS::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -99,8 +99,8 @@
}
// Helper to set up locations for vector unary operations.
-static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
locations->SetInAt(0, Location::RequiresFpuRegister());
@@ -129,7 +129,7 @@
}
void LocationsBuilderMIPS::VisitVecReduce(HVecReduce* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecReduce(HVecReduce* instruction) {
@@ -137,7 +137,7 @@
}
void LocationsBuilderMIPS::VisitVecCnv(HVecCnv* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecCnv(HVecCnv* instruction) {
@@ -155,7 +155,7 @@
}
void LocationsBuilderMIPS::VisitVecNeg(HVecNeg* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecNeg(HVecNeg* instruction) {
@@ -202,7 +202,7 @@
}
void LocationsBuilderMIPS::VisitVecAbs(HVecAbs* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecAbs(HVecAbs* instruction) {
@@ -249,7 +249,7 @@
}
void LocationsBuilderMIPS::VisitVecNot(HVecNot* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecNot(HVecNot* instruction) {
@@ -281,8 +281,8 @@
}
// Helper to set up locations for vector binary operations.
-static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -304,7 +304,7 @@
}
void LocationsBuilderMIPS::VisitVecAdd(HVecAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecAdd(HVecAdd* instruction) {
@@ -346,7 +346,7 @@
}
void LocationsBuilderMIPS::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
@@ -386,7 +386,7 @@
}
void LocationsBuilderMIPS::VisitVecSub(HVecSub* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecSub(HVecSub* instruction) {
@@ -428,7 +428,7 @@
}
void LocationsBuilderMIPS::VisitVecMul(HVecMul* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecMul(HVecMul* instruction) {
@@ -470,7 +470,7 @@
}
void LocationsBuilderMIPS::VisitVecDiv(HVecDiv* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecDiv(HVecDiv* instruction) {
@@ -494,7 +494,7 @@
}
void LocationsBuilderMIPS::VisitVecMin(HVecMin* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecMin(HVecMin* instruction) {
@@ -554,7 +554,7 @@
}
void LocationsBuilderMIPS::VisitVecMax(HVecMax* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecMax(HVecMax* instruction) {
@@ -614,7 +614,7 @@
}
void LocationsBuilderMIPS::VisitVecAnd(HVecAnd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecAnd(HVecAnd* instruction) {
@@ -643,7 +643,7 @@
}
void LocationsBuilderMIPS::VisitVecAndNot(HVecAndNot* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecAndNot(HVecAndNot* instruction) {
@@ -651,7 +651,7 @@
}
void LocationsBuilderMIPS::VisitVecOr(HVecOr* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecOr(HVecOr* instruction) {
@@ -680,7 +680,7 @@
}
void LocationsBuilderMIPS::VisitVecXor(HVecXor* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecXor(HVecXor* instruction) {
@@ -709,8 +709,8 @@
}
// Helper to set up locations for vector shift operations.
-static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kUint8:
case DataType::Type::kInt8:
@@ -729,7 +729,7 @@
}
void LocationsBuilderMIPS::VisitVecShl(HVecShl* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecShl(HVecShl* instruction) {
@@ -763,7 +763,7 @@
}
void LocationsBuilderMIPS::VisitVecShr(HVecShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecShr(HVecShr* instruction) {
@@ -797,7 +797,7 @@
}
void LocationsBuilderMIPS::VisitVecUShr(HVecUShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecUShr(HVecUShr* instruction) {
@@ -839,8 +839,8 @@
}
// Helper to set up locations for vector accumulations.
-static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kUint8:
case DataType::Type::kInt8:
@@ -860,7 +860,7 @@
}
void LocationsBuilderMIPS::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
@@ -910,7 +910,7 @@
}
void LocationsBuilderMIPS::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
@@ -919,10 +919,10 @@
}
// Helper to set up locations for vector memory operations.
-static void CreateVecMemLocations(ArenaAllocator* arena,
+static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
bool is_load) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -980,7 +980,7 @@
}
void LocationsBuilderMIPS::VisitVecLoad(HVecLoad* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /* is_load */ true);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ true);
}
void InstructionCodeGeneratorMIPS::VisitVecLoad(HVecLoad* instruction) {
@@ -1023,7 +1023,7 @@
}
void LocationsBuilderMIPS::VisitVecStore(HVecStore* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /* is_load */ false);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ false);
}
void InstructionCodeGeneratorMIPS::VisitVecStore(HVecStore* instruction) {
diff --git a/compiler/optimizing/code_generator_vector_mips64.cc b/compiler/optimizing/code_generator_vector_mips64.cc
index 7d69773..e606df2 100644
--- a/compiler/optimizing/code_generator_vector_mips64.cc
+++ b/compiler/optimizing/code_generator_vector_mips64.cc
@@ -29,7 +29,7 @@
}
void LocationsBuilderMIPS64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -102,8 +102,8 @@
}
// Helper to set up locations for vector unary operations.
-static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
locations->SetInAt(0, Location::RequiresFpuRegister());
@@ -132,7 +132,7 @@
}
void LocationsBuilderMIPS64::VisitVecReduce(HVecReduce* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecReduce(HVecReduce* instruction) {
@@ -140,7 +140,7 @@
}
void LocationsBuilderMIPS64::VisitVecCnv(HVecCnv* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecCnv(HVecCnv* instruction) {
@@ -159,7 +159,7 @@
}
void LocationsBuilderMIPS64::VisitVecNeg(HVecNeg* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecNeg(HVecNeg* instruction) {
@@ -206,7 +206,7 @@
}
void LocationsBuilderMIPS64::VisitVecAbs(HVecAbs* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecAbs(HVecAbs* instruction) {
@@ -253,7 +253,7 @@
}
void LocationsBuilderMIPS64::VisitVecNot(HVecNot* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecNot(HVecNot* instruction) {
@@ -285,8 +285,8 @@
}
// Helper to set up locations for vector binary operations.
-static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -308,7 +308,7 @@
}
void LocationsBuilderMIPS64::VisitVecAdd(HVecAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecAdd(HVecAdd* instruction) {
@@ -350,7 +350,7 @@
}
void LocationsBuilderMIPS64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
@@ -390,7 +390,7 @@
}
void LocationsBuilderMIPS64::VisitVecSub(HVecSub* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecSub(HVecSub* instruction) {
@@ -432,7 +432,7 @@
}
void LocationsBuilderMIPS64::VisitVecMul(HVecMul* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecMul(HVecMul* instruction) {
@@ -474,7 +474,7 @@
}
void LocationsBuilderMIPS64::VisitVecDiv(HVecDiv* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecDiv(HVecDiv* instruction) {
@@ -498,7 +498,7 @@
}
void LocationsBuilderMIPS64::VisitVecMin(HVecMin* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecMin(HVecMin* instruction) {
@@ -558,7 +558,7 @@
}
void LocationsBuilderMIPS64::VisitVecMax(HVecMax* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecMax(HVecMax* instruction) {
@@ -618,7 +618,7 @@
}
void LocationsBuilderMIPS64::VisitVecAnd(HVecAnd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecAnd(HVecAnd* instruction) {
@@ -647,7 +647,7 @@
}
void LocationsBuilderMIPS64::VisitVecAndNot(HVecAndNot* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecAndNot(HVecAndNot* instruction) {
@@ -655,7 +655,7 @@
}
void LocationsBuilderMIPS64::VisitVecOr(HVecOr* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecOr(HVecOr* instruction) {
@@ -684,7 +684,7 @@
}
void LocationsBuilderMIPS64::VisitVecXor(HVecXor* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecXor(HVecXor* instruction) {
@@ -713,8 +713,8 @@
}
// Helper to set up locations for vector shift operations.
-static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kUint8:
case DataType::Type::kInt8:
@@ -733,7 +733,7 @@
}
void LocationsBuilderMIPS64::VisitVecShl(HVecShl* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecShl(HVecShl* instruction) {
@@ -767,7 +767,7 @@
}
void LocationsBuilderMIPS64::VisitVecShr(HVecShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecShr(HVecShr* instruction) {
@@ -801,7 +801,7 @@
}
void LocationsBuilderMIPS64::VisitVecUShr(HVecUShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecUShr(HVecUShr* instruction) {
@@ -843,8 +843,8 @@
}
// Helper to set up locations for vector accumulations.
-static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kUint8:
case DataType::Type::kInt8:
@@ -864,7 +864,7 @@
}
void LocationsBuilderMIPS64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
@@ -914,7 +914,7 @@
}
void LocationsBuilderMIPS64::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
@@ -923,10 +923,10 @@
}
// Helper to set up locations for vector memory operations.
-static void CreateVecMemLocations(ArenaAllocator* arena,
+static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
bool is_load) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -984,7 +984,7 @@
}
void LocationsBuilderMIPS64::VisitVecLoad(HVecLoad* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /* is_load */ true);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ true);
}
void InstructionCodeGeneratorMIPS64::VisitVecLoad(HVecLoad* instruction) {
@@ -1027,7 +1027,7 @@
}
void LocationsBuilderMIPS64::VisitVecStore(HVecStore* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /* is_load */ false);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ false);
}
void InstructionCodeGeneratorMIPS64::VisitVecStore(HVecStore* instruction) {
diff --git a/compiler/optimizing/code_generator_vector_x86.cc b/compiler/optimizing/code_generator_vector_x86.cc
index a2ef1b1..ad8128a 100644
--- a/compiler/optimizing/code_generator_vector_x86.cc
+++ b/compiler/optimizing/code_generator_vector_x86.cc
@@ -26,7 +26,7 @@
#define __ down_cast<X86Assembler*>(GetAssembler())-> // NOLINT
void LocationsBuilderX86::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
HInstruction* input = instruction->InputAt(0);
bool is_zero = IsZeroBitPattern(input);
switch (instruction->GetPackedType()) {
@@ -117,7 +117,7 @@
}
void LocationsBuilderX86::VisitVecExtractScalar(HVecExtractScalar* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kInt64:
// Long needs extra temporary to store into the register pair.
@@ -180,8 +180,8 @@
}
// Helper to set up locations for vector unary operations.
-static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -202,7 +202,7 @@
}
void LocationsBuilderX86::VisitVecReduce(HVecReduce* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
// Long reduction or min/max require a temporary.
if (instruction->GetPackedType() == DataType::Type::kInt64 ||
instruction->GetKind() == HVecReduce::kMin ||
@@ -269,7 +269,7 @@
}
void LocationsBuilderX86::VisitVecCnv(HVecCnv* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecCnv(HVecCnv* instruction) {
@@ -287,7 +287,7 @@
}
void LocationsBuilderX86::VisitVecNeg(HVecNeg* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecNeg(HVecNeg* instruction) {
@@ -334,7 +334,7 @@
}
void LocationsBuilderX86::VisitVecAbs(HVecAbs* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
// Integral-abs requires a temporary for the comparison.
if (instruction->GetPackedType() == DataType::Type::kInt32) {
instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
@@ -375,7 +375,7 @@
}
void LocationsBuilderX86::VisitVecNot(HVecNot* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
// Boolean-not requires a temporary to construct the 16 x one.
if (instruction->GetPackedType() == DataType::Type::kBool) {
instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
@@ -424,8 +424,8 @@
}
// Helper to set up locations for vector binary operations.
-static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -447,7 +447,7 @@
}
void LocationsBuilderX86::VisitVecAdd(HVecAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecAdd(HVecAdd* instruction) {
@@ -489,7 +489,7 @@
}
void LocationsBuilderX86::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
@@ -516,7 +516,7 @@
}
void LocationsBuilderX86::VisitVecSub(HVecSub* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecSub(HVecSub* instruction) {
@@ -558,7 +558,7 @@
}
void LocationsBuilderX86::VisitVecMul(HVecMul* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecMul(HVecMul* instruction) {
@@ -591,7 +591,7 @@
}
void LocationsBuilderX86::VisitVecDiv(HVecDiv* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecDiv(HVecDiv* instruction) {
@@ -615,7 +615,7 @@
}
void LocationsBuilderX86::VisitVecMin(HVecMin* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecMin(HVecMin* instruction) {
@@ -666,7 +666,7 @@
}
void LocationsBuilderX86::VisitVecMax(HVecMax* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecMax(HVecMax* instruction) {
@@ -717,7 +717,7 @@
}
void LocationsBuilderX86::VisitVecAnd(HVecAnd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecAnd(HVecAnd* instruction) {
@@ -752,7 +752,7 @@
}
void LocationsBuilderX86::VisitVecAndNot(HVecAndNot* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecAndNot(HVecAndNot* instruction) {
@@ -787,7 +787,7 @@
}
void LocationsBuilderX86::VisitVecOr(HVecOr* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecOr(HVecOr* instruction) {
@@ -822,7 +822,7 @@
}
void LocationsBuilderX86::VisitVecXor(HVecXor* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecXor(HVecXor* instruction) {
@@ -857,8 +857,8 @@
}
// Helper to set up locations for vector shift operations.
-static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -875,7 +875,7 @@
}
void LocationsBuilderX86::VisitVecShl(HVecShl* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecShl(HVecShl* instruction) {
@@ -904,7 +904,7 @@
}
void LocationsBuilderX86::VisitVecShr(HVecShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecShr(HVecShr* instruction) {
@@ -929,7 +929,7 @@
}
void LocationsBuilderX86::VisitVecUShr(HVecUShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecUShr(HVecUShr* instruction) {
@@ -958,7 +958,7 @@
}
void LocationsBuilderX86::VisitVecSetScalars(HVecSetScalars* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
DCHECK_EQ(1u, instruction->InputCount()); // only one input currently implemented
@@ -1045,8 +1045,8 @@
}
// Helper to set up locations for vector accumulations.
-static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kUint8:
case DataType::Type::kInt8:
@@ -1066,7 +1066,7 @@
}
void LocationsBuilderX86::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
@@ -1075,7 +1075,7 @@
}
void LocationsBuilderX86::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
@@ -1084,10 +1084,10 @@
}
// Helper to set up locations for vector memory operations.
-static void CreateVecMemLocations(ArenaAllocator* arena,
+static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
bool is_load) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -1131,7 +1131,7 @@
}
void LocationsBuilderX86::VisitVecLoad(HVecLoad* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ true);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ true);
// String load requires a temporary for the compressed load.
if (mirror::kUseStringCompression && instruction->IsStringCharAt()) {
instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
@@ -1194,7 +1194,7 @@
}
void LocationsBuilderX86::VisitVecStore(HVecStore* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ false);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ false);
}
void InstructionCodeGeneratorX86::VisitVecStore(HVecStore* instruction) {
diff --git a/compiler/optimizing/code_generator_vector_x86_64.cc b/compiler/optimizing/code_generator_vector_x86_64.cc
index 2270f6b..107030e 100644
--- a/compiler/optimizing/code_generator_vector_x86_64.cc
+++ b/compiler/optimizing/code_generator_vector_x86_64.cc
@@ -26,7 +26,7 @@
#define __ down_cast<X86_64Assembler*>(GetAssembler())-> // NOLINT
void LocationsBuilderX86_64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
HInstruction* input = instruction->InputAt(0);
bool is_zero = IsZeroBitPattern(input);
switch (instruction->GetPackedType()) {
@@ -108,7 +108,7 @@
}
void LocationsBuilderX86_64::VisitVecExtractScalar(HVecExtractScalar* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -163,8 +163,8 @@
}
// Helper to set up locations for vector unary operations.
-static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -185,7 +185,7 @@
}
void LocationsBuilderX86_64::VisitVecReduce(HVecReduce* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
// Long reduction or min/max require a temporary.
if (instruction->GetPackedType() == DataType::Type::kInt64 ||
instruction->GetKind() == HVecReduce::kMin ||
@@ -252,7 +252,7 @@
}
void LocationsBuilderX86_64::VisitVecCnv(HVecCnv* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecCnv(HVecCnv* instruction) {
@@ -270,7 +270,7 @@
}
void LocationsBuilderX86_64::VisitVecNeg(HVecNeg* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecNeg(HVecNeg* instruction) {
@@ -317,7 +317,7 @@
}
void LocationsBuilderX86_64::VisitVecAbs(HVecAbs* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
// Integral-abs requires a temporary for the comparison.
if (instruction->GetPackedType() == DataType::Type::kInt32) {
instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
@@ -358,7 +358,7 @@
}
void LocationsBuilderX86_64::VisitVecNot(HVecNot* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
// Boolean-not requires a temporary to construct the 16 x one.
if (instruction->GetPackedType() == DataType::Type::kBool) {
instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
@@ -407,8 +407,8 @@
}
// Helper to set up locations for vector binary operations.
-static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -430,7 +430,7 @@
}
void LocationsBuilderX86_64::VisitVecAdd(HVecAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecAdd(HVecAdd* instruction) {
@@ -472,7 +472,7 @@
}
void LocationsBuilderX86_64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
@@ -499,7 +499,7 @@
}
void LocationsBuilderX86_64::VisitVecSub(HVecSub* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecSub(HVecSub* instruction) {
@@ -541,7 +541,7 @@
}
void LocationsBuilderX86_64::VisitVecMul(HVecMul* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecMul(HVecMul* instruction) {
@@ -574,7 +574,7 @@
}
void LocationsBuilderX86_64::VisitVecDiv(HVecDiv* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecDiv(HVecDiv* instruction) {
@@ -598,7 +598,7 @@
}
void LocationsBuilderX86_64::VisitVecMin(HVecMin* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecMin(HVecMin* instruction) {
@@ -649,7 +649,7 @@
}
void LocationsBuilderX86_64::VisitVecMax(HVecMax* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecMax(HVecMax* instruction) {
@@ -700,7 +700,7 @@
}
void LocationsBuilderX86_64::VisitVecAnd(HVecAnd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecAnd(HVecAnd* instruction) {
@@ -735,7 +735,7 @@
}
void LocationsBuilderX86_64::VisitVecAndNot(HVecAndNot* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecAndNot(HVecAndNot* instruction) {
@@ -770,7 +770,7 @@
}
void LocationsBuilderX86_64::VisitVecOr(HVecOr* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecOr(HVecOr* instruction) {
@@ -805,7 +805,7 @@
}
void LocationsBuilderX86_64::VisitVecXor(HVecXor* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecXor(HVecXor* instruction) {
@@ -840,8 +840,8 @@
}
// Helper to set up locations for vector shift operations.
-static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -858,7 +858,7 @@
}
void LocationsBuilderX86_64::VisitVecShl(HVecShl* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecShl(HVecShl* instruction) {
@@ -887,7 +887,7 @@
}
void LocationsBuilderX86_64::VisitVecShr(HVecShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecShr(HVecShr* instruction) {
@@ -912,7 +912,7 @@
}
void LocationsBuilderX86_64::VisitVecUShr(HVecUShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecUShr(HVecUShr* instruction) {
@@ -941,7 +941,7 @@
}
void LocationsBuilderX86_64::VisitVecSetScalars(HVecSetScalars* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
DCHECK_EQ(1u, instruction->InputCount()); // only one input currently implemented
@@ -1018,8 +1018,8 @@
}
// Helper to set up locations for vector accumulations.
-static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kUint8:
case DataType::Type::kInt8:
@@ -1039,7 +1039,7 @@
}
void LocationsBuilderX86_64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
@@ -1048,7 +1048,7 @@
}
void LocationsBuilderX86_64::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
@@ -1057,10 +1057,10 @@
}
// Helper to set up locations for vector memory operations.
-static void CreateVecMemLocations(ArenaAllocator* arena,
+static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
bool is_load) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -1104,7 +1104,7 @@
}
void LocationsBuilderX86_64::VisitVecLoad(HVecLoad* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ true);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ true);
// String load requires a temporary for the compressed load.
if (mirror::kUseStringCompression && instruction->IsStringCharAt()) {
instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
@@ -1167,7 +1167,7 @@
}
void LocationsBuilderX86_64::VisitVecStore(HVecStore* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ false);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ false);
}
void InstructionCodeGeneratorX86_64::VisitVecStore(HVecStore* instruction) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 3515649..d8a47fa 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -414,7 +414,7 @@
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(
locations->InAt(0),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
@@ -811,7 +811,7 @@
// We're moving two or three locations to locations that could
// overlap, so we need a parallel move resolver.
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(ref_,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
DataType::Type::kReference,
@@ -1030,21 +1030,21 @@
block_labels_(nullptr),
location_builder_(graph, this),
instruction_visitor_(graph, this),
- move_resolver_(graph->GetArena(), this),
- assembler_(graph->GetArena()),
+ move_resolver_(graph->GetAllocator(), this),
+ assembler_(graph->GetAllocator()),
isa_features_(isa_features),
- boot_image_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- boot_image_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- string_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ boot_image_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ boot_image_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ jit_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ jit_class_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
constant_area_start_(-1),
- fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ fixups_to_jump_tables_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
method_address_offset_(std::less<uint32_t>(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
// Use a fake return address register to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
}
@@ -1333,7 +1333,7 @@
}
void CodeGeneratorX86::MoveLocation(Location dst, Location src, DataType::Type dst_type) {
- HParallelMove move(GetGraph()->GetArena());
+ HParallelMove move(GetGraph()->GetAllocator());
if (dst_type == DataType::Type::kInt64 && !src.IsConstant() && !src.IsFpuRegister()) {
move.AddMove(src.ToLow(), dst.ToLow(), DataType::Type::kInt32, nullptr);
move.AddMove(src.ToHigh(), dst.ToHigh(), DataType::Type::kInt32, nullptr);
@@ -1681,7 +1681,7 @@
}
void LocationsBuilderX86::VisitIf(HIf* if_instr) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr);
if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
locations->SetInAt(0, Location::Any());
}
@@ -1698,7 +1698,7 @@
}
void LocationsBuilderX86::VisitDeoptimize(HDeoptimize* deoptimize) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
InvokeRuntimeCallingConvention calling_convention;
RegisterSet caller_saves = RegisterSet::Empty();
@@ -1718,7 +1718,7 @@
}
void LocationsBuilderX86::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(flag, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -1750,7 +1750,7 @@
}
void LocationsBuilderX86::VisitSelect(HSelect* select) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select);
if (DataType::IsFloatingPointType(select->GetType())) {
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::Any());
@@ -1844,7 +1844,7 @@
}
void LocationsBuilderX86::VisitNativeDebugInfo(HNativeDebugInfo* info) {
- new (GetGraph()->GetArena()) LocationSummary(info);
+ new (GetGraph()->GetAllocator()) LocationSummary(info);
}
void InstructionCodeGeneratorX86::VisitNativeDebugInfo(HNativeDebugInfo*) {
@@ -1857,7 +1857,7 @@
void LocationsBuilderX86::HandleCondition(HCondition* cond) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(cond, LocationSummary::kNoCall);
// Handle the long/FP comparisons made in instruction simplification.
switch (cond->InputAt(0)->GetType()) {
case DataType::Type::kInt64: {
@@ -2024,7 +2024,7 @@
void LocationsBuilderX86::VisitIntConstant(HIntConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2034,7 +2034,7 @@
void LocationsBuilderX86::VisitNullConstant(HNullConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2044,7 +2044,7 @@
void LocationsBuilderX86::VisitLongConstant(HLongConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2054,7 +2054,7 @@
void LocationsBuilderX86::VisitFloatConstant(HFloatConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2064,7 +2064,7 @@
void LocationsBuilderX86::VisitDoubleConstant(HDoubleConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2099,7 +2099,7 @@
void LocationsBuilderX86::VisitReturn(HReturn* ret) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(ret, LocationSummary::kNoCall);
switch (ret->InputAt(0)->GetType()) {
case DataType::Type::kReference:
case DataType::Type::kBool:
@@ -2300,7 +2300,7 @@
void LocationsBuilderX86::VisitNeg(HNeg* neg) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall);
switch (neg->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -2381,7 +2381,7 @@
void LocationsBuilderX86::VisitX86FPNeg(HX86FPNeg* neg) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall);
DCHECK(DataType::IsFloatingPointType(neg->GetType()));
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -2423,7 +2423,7 @@
? LocationSummary::kCallOnMainOnly
: LocationSummary::kNoCall;
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
+ new (GetGraph()->GetAllocator()) LocationSummary(conversion, call_kind);
switch (result_type) {
case DataType::Type::kUint8:
@@ -2921,7 +2921,7 @@
void LocationsBuilderX86::VisitAdd(HAdd* add) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(add, LocationSummary::kNoCall);
switch (add->GetResultType()) {
case DataType::Type::kInt32: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -3048,7 +3048,7 @@
void LocationsBuilderX86::VisitSub(HSub* sub) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(sub, LocationSummary::kNoCall);
switch (sub->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64: {
@@ -3154,7 +3154,7 @@
void LocationsBuilderX86::VisitMul(HMul* mul) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall);
switch (mul->GetResultType()) {
case DataType::Type::kInt32:
locations->SetInAt(0, Location::RequiresRegister());
@@ -3581,7 +3581,7 @@
GenerateDivRemWithAnyConstant(instruction);
}
} else {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) DivRemMinusOneSlowPathX86(
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) DivRemMinusOneSlowPathX86(
instruction, out.AsRegister<Register>(), is_div);
codegen_->AddSlowPath(slow_path);
@@ -3630,7 +3630,7 @@
LocationSummary::CallKind call_kind = (div->GetResultType() == DataType::Type::kInt64)
? LocationSummary::kCallOnMainOnly
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(div, call_kind);
switch (div->GetResultType()) {
case DataType::Type::kInt32: {
@@ -3735,7 +3735,7 @@
LocationSummary::CallKind call_kind = (rem->GetResultType() == DataType::Type::kInt64)
? LocationSummary::kCallOnMainOnly
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(rem, call_kind);
switch (type) {
case DataType::Type::kInt32: {
@@ -3817,7 +3817,7 @@
}
void InstructionCodeGeneratorX86::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathX86(instruction);
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathX86(instruction);
codegen_->AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -3867,7 +3867,7 @@
DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(op, LocationSummary::kNoCall);
switch (op->GetResultType()) {
case DataType::Type::kInt32:
@@ -4062,7 +4062,7 @@
void LocationsBuilderX86::VisitRor(HRor* ror) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(ror, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(ror, LocationSummary::kNoCall);
switch (ror->GetResultType()) {
case DataType::Type::kInt64:
@@ -4170,8 +4170,8 @@
}
void LocationsBuilderX86::VisitNewInstance(HNewInstance* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
locations->SetOut(Location::RegisterLocation(EAX));
if (instruction->IsStringAlloc()) {
locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
@@ -4199,8 +4199,8 @@
}
void LocationsBuilderX86::VisitNewArray(HNewArray* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
locations->SetOut(Location::RegisterLocation(EAX));
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -4219,7 +4219,7 @@
void LocationsBuilderX86::VisitParameterValue(HParameterValue* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
if (location.IsStackSlot()) {
location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
@@ -4235,7 +4235,7 @@
void LocationsBuilderX86::VisitCurrentMethod(HCurrentMethod* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
}
@@ -4244,7 +4244,7 @@
void LocationsBuilderX86::VisitClassTableGet(HClassTableGet* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
@@ -4270,7 +4270,7 @@
void LocationsBuilderX86::VisitNot(HNot* not_) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(not_, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
@@ -4297,7 +4297,7 @@
void LocationsBuilderX86::VisitBooleanNot(HBooleanNot* bool_not) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(bool_not, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
@@ -4312,7 +4312,7 @@
void LocationsBuilderX86::VisitCompare(HCompare* compare) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(compare, LocationSummary::kNoCall);
switch (compare->InputAt(0)->GetType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -4431,7 +4431,7 @@
void LocationsBuilderX86::VisitPhi(HPhi* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
locations->SetInAt(i, Location::Any());
}
@@ -4714,10 +4714,10 @@
bool object_field_get_with_read_barrier =
kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- kEmitCompilerReadBarrier ?
- LocationSummary::kCallOnSlowPath :
- LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ kEmitCompilerReadBarrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -4862,7 +4862,7 @@
DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
bool is_volatile = field_info.IsVolatile();
DataType::Type field_type = field_info.GetFieldType();
@@ -5149,7 +5149,7 @@
}
void CodeGeneratorX86::GenerateExplicitNullCheck(HNullCheck* instruction) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathX86(instruction);
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) NullCheckSlowPathX86(instruction);
AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -5176,10 +5176,10 @@
bool object_array_get_with_read_barrier =
kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- object_array_get_with_read_barrier ?
- LocationSummary::kCallOnSlowPath :
- LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ object_array_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -5332,7 +5332,7 @@
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction,
may_need_runtime_call_for_type_check ?
LocationSummary::kCallOnSlowPath :
@@ -5427,7 +5427,7 @@
Location temp_loc = locations->GetTemp(0);
Register temp = temp_loc.AsRegister<Register>();
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathX86(instruction);
+ slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathX86(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
__ testl(register_value, register_value);
@@ -5570,7 +5570,7 @@
}
void LocationsBuilderX86::VisitArrayLength(HArrayLength* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
if (!instruction->IsEmittedAtUseSite()) {
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
@@ -5618,7 +5618,7 @@
Location index_loc = locations->InAt(0);
Location length_loc = locations->InAt(1);
SlowPathCode* slow_path =
- new (GetGraph()->GetArena()) BoundsCheckSlowPathX86(instruction);
+ new (GetGraph()->GetAllocator()) BoundsCheckSlowPathX86(instruction);
if (length_loc.IsConstant()) {
int32_t length = CodeGenerator::GetInt32ValueOf(length_loc.GetConstant());
@@ -5684,8 +5684,8 @@
}
void LocationsBuilderX86::VisitSuspendCheck(HSuspendCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
// In suspend check slow path, usually there are no caller-save registers at all.
// If SIMD instructions are present, however, we force spilling all live SIMD
// registers in full width (since the runtime only saves/restores lower part).
@@ -5712,7 +5712,7 @@
SuspendCheckSlowPathX86* slow_path =
down_cast<SuspendCheckSlowPathX86*>(instruction->GetSlowPath());
if (slow_path == nullptr) {
- slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathX86(instruction, successor);
+ slow_path = new (GetGraph()->GetAllocator()) SuspendCheckSlowPathX86(instruction, successor);
instruction->SetSlowPath(slow_path);
codegen_->AddSlowPath(slow_path);
if (successor != nullptr) {
@@ -6044,7 +6044,7 @@
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind);
if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -6165,7 +6165,7 @@
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86(
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathX86(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
codegen_->AddSlowPath(slow_path);
@@ -6184,7 +6184,7 @@
void LocationsBuilderX86::VisitClinitCheck(HClinitCheck* check) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+ new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
locations->SetInAt(0, Location::RequiresRegister());
if (check->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -6193,7 +6193,7 @@
void InstructionCodeGeneratorX86::VisitClinitCheck(HClinitCheck* check) {
// We assume the class to not be null.
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86(
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathX86(
check->GetLoadClass(), check, check->GetDexPc(), true);
codegen_->AddSlowPath(slow_path);
GenerateClassInitializationCheck(slow_path,
@@ -6229,7 +6229,7 @@
void LocationsBuilderX86::VisitLoadString(HLoadString* load) {
LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind);
HLoadString::LoadKind load_kind = load->GetLoadKind();
if (load_kind == HLoadString::LoadKind::kBootImageLinkTimePcRelative ||
load_kind == HLoadString::LoadKind::kBootImageInternTable ||
@@ -6300,7 +6300,7 @@
Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
// /* GcRoot<mirror::String> */ out = *address /* PC-relative */
GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86(load);
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadStringSlowPathX86(load);
codegen_->AddSlowPath(slow_path);
__ testl(out, out);
__ j(kEqual, slow_path->GetEntryLabel());
@@ -6333,7 +6333,7 @@
void LocationsBuilderX86::VisitLoadException(HLoadException* load) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -6342,7 +6342,7 @@
}
void LocationsBuilderX86::VisitClearException(HClearException* clear) {
- new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall);
}
void InstructionCodeGeneratorX86::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
@@ -6350,8 +6350,8 @@
}
void LocationsBuilderX86::VisitThrow(HThrow* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
}
@@ -6403,7 +6403,8 @@
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
if (baker_read_barrier_slow_path) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -6580,8 +6581,8 @@
__ cmpl(out, Address(ESP, cls.GetStackIndex()));
}
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ j(kNotEqual, slow_path->GetEntryLabel());
__ movl(out, Immediate(1));
@@ -6612,8 +6613,8 @@
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
if (zero.IsLinked()) {
@@ -6661,7 +6662,8 @@
IsTypeCheckSlowPathFatal(type_check_kind, throws_into_catch)
? LocationSummary::kNoCall
: LocationSummary::kCallOnSlowPath;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
if (type_check_kind == TypeCheckKind::kInterfaceCheck) {
// Require a register for the interface check since there is a loop that compares the class to
@@ -6704,8 +6706,8 @@
IsTypeCheckSlowPathFatal(type_check_kind, instruction->CanThrowIntoCatchBlock());
SlowPathCode* type_check_slow_path =
- new (GetGraph()->GetArena()) TypeCheckSlowPathX86(instruction,
- is_type_check_slow_path_fatal);
+ new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86(instruction,
+ is_type_check_slow_path_fatal);
codegen_->AddSlowPath(type_check_slow_path);
NearLabel done;
@@ -6902,8 +6904,8 @@
}
void LocationsBuilderX86::VisitMonitorOperation(HMonitorOperation* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
}
@@ -6926,7 +6928,7 @@
void LocationsBuilderX86::HandleBitwiseOperation(HBinaryOperation* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
DCHECK(instruction->GetResultType() == DataType::Type::kInt32
|| instruction->GetResultType() == DataType::Type::kInt64);
locations->SetInAt(0, Location::RequiresRegister());
@@ -7148,7 +7150,7 @@
"have different sizes.");
// Slow path marking the GC root `root`.
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86(
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathX86(
instruction, root, /* unpoison_ref_before_marking */ false);
codegen_->AddSlowPath(slow_path);
@@ -7278,10 +7280,10 @@
SlowPathCode* slow_path;
if (always_update_field) {
DCHECK(temp != nullptr);
- slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkAndUpdateFieldSlowPathX86(
+ slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86(
instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp);
} else {
- slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86(
+ slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathX86(
instruction, ref, /* unpoison_ref_before_marking */ true);
}
AddSlowPath(slow_path);
@@ -7314,7 +7316,7 @@
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCode* slow_path = new (GetGraph()->GetArena())
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator())
ReadBarrierForHeapReferenceSlowPathX86(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -7350,7 +7352,7 @@
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCode* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathX86(instruction, out, root);
+ new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathX86(instruction, out, root);
AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
@@ -7370,7 +7372,7 @@
// Simple implementation of packed switch - generate cascaded compare/jumps.
void LocationsBuilderX86::VisitPackedSwitch(HPackedSwitch* switch_instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -7437,7 +7439,7 @@
void LocationsBuilderX86::VisitX86PackedSwitch(HX86PackedSwitch* switch_instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
// Constant area pointer.
@@ -7492,7 +7494,7 @@
void LocationsBuilderX86::VisitX86ComputeBaseMethodAddress(
HX86ComputeBaseMethodAddress* insn) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(insn, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(insn, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -7516,7 +7518,7 @@
void LocationsBuilderX86::VisitX86LoadFromConstantTable(
HX86LoadFromConstantTable* insn) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(insn, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(insn, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::ConstantLocation(insn->GetConstant()));
@@ -7676,28 +7678,31 @@
HX86ComputeBaseMethodAddress* method_base,
Register reg) {
AssemblerFixup* fixup =
- new (GetGraph()->GetArena()) RIPFixup(*this, method_base, __ AddDouble(v));
+ new (GetGraph()->GetAllocator()) RIPFixup(*this, method_base, __ AddDouble(v));
return Address(reg, kDummy32BitOffset, fixup);
}
Address CodeGeneratorX86::LiteralFloatAddress(float v,
HX86ComputeBaseMethodAddress* method_base,
Register reg) {
- AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, method_base, __ AddFloat(v));
+ AssemblerFixup* fixup =
+ new (GetGraph()->GetAllocator()) RIPFixup(*this, method_base, __ AddFloat(v));
return Address(reg, kDummy32BitOffset, fixup);
}
Address CodeGeneratorX86::LiteralInt32Address(int32_t v,
HX86ComputeBaseMethodAddress* method_base,
Register reg) {
- AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, method_base, __ AddInt32(v));
+ AssemblerFixup* fixup =
+ new (GetGraph()->GetAllocator()) RIPFixup(*this, method_base, __ AddInt32(v));
return Address(reg, kDummy32BitOffset, fixup);
}
Address CodeGeneratorX86::LiteralInt64Address(int64_t v,
HX86ComputeBaseMethodAddress* method_base,
Register reg) {
- AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, method_base, __ AddInt64(v));
+ AssemblerFixup* fixup =
+ new (GetGraph()->GetAllocator()) RIPFixup(*this, method_base, __ AddInt64(v));
return Address(reg, kDummy32BitOffset, fixup);
}
@@ -7747,7 +7752,7 @@
Register value) {
// Create a fixup to be used to create and address the jump table.
JumpTableRIPFixup* table_fixup =
- new (GetGraph()->GetArena()) JumpTableRIPFixup(*this, switch_instr);
+ new (GetGraph()->GetAllocator()) JumpTableRIPFixup(*this, switch_instr);
// We have to populate the jump tables.
fixups_to_jump_tables_.push_back(table_fixup);
@@ -7773,13 +7778,13 @@
// TODO: Consider pairs in the parallel move resolver, then this could be nicely merged
// with the else branch.
if (type == DataType::Type::kInt64) {
- HParallelMove parallel_move(GetGraph()->GetArena());
+ HParallelMove parallel_move(GetGraph()->GetAllocator());
parallel_move.AddMove(return_loc.ToLow(), target.ToLow(), DataType::Type::kInt32, nullptr);
parallel_move.AddMove(return_loc.ToHigh(), target.ToHigh(), DataType::Type::kInt32, nullptr);
GetMoveResolver()->EmitNativeCode(¶llel_move);
} else {
// Let the parallel move resolver take care of all of this.
- HParallelMove parallel_move(GetGraph()->GetArena());
+ HParallelMove parallel_move(GetGraph()->GetAllocator());
parallel_move.AddMove(return_loc, target, type, nullptr);
GetMoveResolver()->EmitNativeCode(¶llel_move);
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index e8bfa66..b6aa110 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -427,7 +427,7 @@
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(
locations->InAt(0),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
@@ -831,7 +831,7 @@
// We're moving two or three locations to locations that could
// overlap, so we need a parallel move resolver.
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(ref_,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
DataType::Type::kReference,
@@ -1230,19 +1230,19 @@
block_labels_(nullptr),
location_builder_(graph, this),
instruction_visitor_(graph, this),
- move_resolver_(graph->GetArena(), this),
- assembler_(graph->GetArena()),
+ move_resolver_(graph->GetAllocator(), this),
+ assembler_(graph->GetAllocator()),
isa_features_(isa_features),
constant_area_start_(0),
- boot_image_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- boot_image_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- string_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ boot_image_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ boot_image_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ jit_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ jit_class_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ fixups_to_jump_tables_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
}
@@ -1702,7 +1702,7 @@
}
void LocationsBuilderX86_64::VisitIf(HIf* if_instr) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr);
if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
locations->SetInAt(0, Location::Any());
}
@@ -1719,7 +1719,7 @@
}
void LocationsBuilderX86_64::VisitDeoptimize(HDeoptimize* deoptimize) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
InvokeRuntimeCallingConvention calling_convention;
RegisterSet caller_saves = RegisterSet::Empty();
@@ -1739,7 +1739,7 @@
}
void LocationsBuilderX86_64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(flag, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -1767,7 +1767,7 @@
}
void LocationsBuilderX86_64::VisitSelect(HSelect* select) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select);
if (DataType::IsFloatingPointType(select->GetType())) {
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::Any());
@@ -1847,7 +1847,7 @@
}
void LocationsBuilderX86_64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
- new (GetGraph()->GetArena()) LocationSummary(info);
+ new (GetGraph()->GetAllocator()) LocationSummary(info);
}
void InstructionCodeGeneratorX86_64::VisitNativeDebugInfo(HNativeDebugInfo*) {
@@ -1860,7 +1860,7 @@
void LocationsBuilderX86_64::HandleCondition(HCondition* cond) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(cond, LocationSummary::kNoCall);
// Handle the long/FP comparisons made in instruction simplification.
switch (cond->InputAt(0)->GetType()) {
case DataType::Type::kInt64:
@@ -2034,7 +2034,7 @@
void LocationsBuilderX86_64::VisitCompare(HCompare* compare) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(compare, LocationSummary::kNoCall);
switch (compare->InputAt(0)->GetType()) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -2132,7 +2132,7 @@
void LocationsBuilderX86_64::VisitIntConstant(HIntConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2142,7 +2142,7 @@
void LocationsBuilderX86_64::VisitNullConstant(HNullConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2152,7 +2152,7 @@
void LocationsBuilderX86_64::VisitLongConstant(HLongConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2162,7 +2162,7 @@
void LocationsBuilderX86_64::VisitFloatConstant(HFloatConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2172,7 +2172,7 @@
void LocationsBuilderX86_64::VisitDoubleConstant(HDoubleConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2208,7 +2208,7 @@
void LocationsBuilderX86_64::VisitReturn(HReturn* ret) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(ret, LocationSummary::kNoCall);
switch (ret->InputAt(0)->GetType()) {
case DataType::Type::kReference:
case DataType::Type::kBool:
@@ -2474,7 +2474,7 @@
void LocationsBuilderX86_64::VisitNeg(HNeg* neg) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall);
switch (neg->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -2540,7 +2540,7 @@
void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(conversion, LocationSummary::kNoCall);
DataType::Type result_type = conversion->GetResultType();
DataType::Type input_type = conversion->GetInputType();
DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type))
@@ -3010,7 +3010,7 @@
void LocationsBuilderX86_64::VisitAdd(HAdd* add) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(add, LocationSummary::kNoCall);
switch (add->GetResultType()) {
case DataType::Type::kInt32: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -3134,7 +3134,7 @@
void LocationsBuilderX86_64::VisitSub(HSub* sub) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(sub, LocationSummary::kNoCall);
switch (sub->GetResultType()) {
case DataType::Type::kInt32: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -3225,7 +3225,7 @@
void LocationsBuilderX86_64::VisitMul(HMul* mul) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall);
switch (mul->GetResultType()) {
case DataType::Type::kInt32: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -3649,7 +3649,7 @@
}
} else {
SlowPathCode* slow_path =
- new (GetGraph()->GetArena()) DivRemMinusOneSlowPathX86_64(
+ new (GetGraph()->GetAllocator()) DivRemMinusOneSlowPathX86_64(
instruction, out.AsRegister(), type, is_div);
codegen_->AddSlowPath(slow_path);
@@ -3678,7 +3678,7 @@
void LocationsBuilderX86_64::VisitDiv(HDiv* div) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(div, LocationSummary::kNoCall);
switch (div->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64: {
@@ -3761,7 +3761,7 @@
void LocationsBuilderX86_64::VisitRem(HRem* rem) {
DataType::Type type = rem->GetResultType();
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(rem, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(rem, LocationSummary::kNoCall);
switch (type) {
case DataType::Type::kInt32:
@@ -3818,7 +3818,7 @@
void InstructionCodeGeneratorX86_64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
SlowPathCode* slow_path =
- new (GetGraph()->GetArena()) DivZeroCheckSlowPathX86_64(instruction);
+ new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathX86_64(instruction);
codegen_->AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -3869,7 +3869,7 @@
DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(op, LocationSummary::kNoCall);
switch (op->GetResultType()) {
case DataType::Type::kInt32:
@@ -3945,7 +3945,7 @@
void LocationsBuilderX86_64::VisitRor(HRor* ror) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(ror, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(ror, LocationSummary::kNoCall);
switch (ror->GetResultType()) {
case DataType::Type::kInt32:
@@ -4017,8 +4017,8 @@
}
void LocationsBuilderX86_64::VisitNewInstance(HNewInstance* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
if (instruction->IsStringAlloc()) {
locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
@@ -4046,8 +4046,8 @@
}
void LocationsBuilderX86_64::VisitNewArray(HNewArray* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetOut(Location::RegisterLocation(RAX));
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -4066,7 +4066,7 @@
void LocationsBuilderX86_64::VisitParameterValue(HParameterValue* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
if (location.IsStackSlot()) {
location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
@@ -4083,7 +4083,7 @@
void LocationsBuilderX86_64::VisitCurrentMethod(HCurrentMethod* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
}
@@ -4094,7 +4094,7 @@
void LocationsBuilderX86_64::VisitClassTableGet(HClassTableGet* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
@@ -4119,7 +4119,7 @@
void LocationsBuilderX86_64::VisitNot(HNot* not_) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(not_, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
@@ -4145,7 +4145,7 @@
void LocationsBuilderX86_64::VisitBooleanNot(HBooleanNot* bool_not) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(bool_not, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
@@ -4160,7 +4160,7 @@
void LocationsBuilderX86_64::VisitPhi(HPhi* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
locations->SetInAt(i, Location::Any());
}
@@ -4201,10 +4201,10 @@
bool object_field_get_with_read_barrier =
kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- object_field_get_with_read_barrier ?
- LocationSummary::kCallOnSlowPath :
- LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ object_field_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -4326,7 +4326,7 @@
DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
DataType::Type field_type = field_info.GetFieldType();
bool is_volatile = field_info.IsVolatile();
bool needs_write_barrier =
@@ -4602,7 +4602,7 @@
}
void CodeGeneratorX86_64::GenerateExplicitNullCheck(HNullCheck* instruction) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathX86_64(instruction);
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) NullCheckSlowPathX86_64(instruction);
AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -4629,10 +4629,10 @@
bool object_array_get_with_read_barrier =
kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- object_array_get_with_read_barrier ?
- LocationSummary::kCallOnSlowPath :
- LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ object_array_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -4775,7 +4775,7 @@
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction,
may_need_runtime_call_for_type_check ?
LocationSummary::kCallOnSlowPath :
@@ -4864,7 +4864,7 @@
Location temp_loc = locations->GetTemp(0);
CpuRegister temp = temp_loc.AsRegister<CpuRegister>();
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathX86_64(instruction);
+ slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathX86_64(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
__ testl(register_value, register_value);
@@ -5002,7 +5002,7 @@
void LocationsBuilderX86_64::VisitArrayLength(HArrayLength* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
if (!instruction->IsEmittedAtUseSite()) {
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
@@ -5043,7 +5043,7 @@
LocationSummary* locations = instruction->GetLocations();
Location index_loc = locations->InAt(0);
Location length_loc = locations->InAt(1);
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathX86_64(instruction);
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) BoundsCheckSlowPathX86_64(instruction);
if (length_loc.IsConstant()) {
int32_t length = CodeGenerator::GetInt32ValueOf(length_loc.GetConstant());
@@ -5129,8 +5129,8 @@
}
void LocationsBuilderX86_64::VisitSuspendCheck(HSuspendCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
// In suspend check slow path, usually there are no caller-save registers at all.
// If SIMD instructions are present, however, we force spilling all live SIMD
// registers in full width (since the runtime only saves/restores lower part).
@@ -5157,7 +5157,7 @@
SuspendCheckSlowPathX86_64* slow_path =
down_cast<SuspendCheckSlowPathX86_64*>(instruction->GetSlowPath());
if (slow_path == nullptr) {
- slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathX86_64(instruction, successor);
+ slow_path = new (GetGraph()->GetAllocator()) SuspendCheckSlowPathX86_64(instruction, successor);
instruction->SetSlowPath(slow_path);
codegen_->AddSlowPath(slow_path);
if (successor != nullptr) {
@@ -5439,7 +5439,7 @@
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind);
if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -5555,7 +5555,7 @@
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86_64(
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathX86_64(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
@@ -5572,7 +5572,7 @@
void LocationsBuilderX86_64::VisitClinitCheck(HClinitCheck* check) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+ new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
locations->SetInAt(0, Location::RequiresRegister());
if (check->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -5581,7 +5581,7 @@
void InstructionCodeGeneratorX86_64::VisitClinitCheck(HClinitCheck* check) {
// We assume the class to not be null.
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86_64(
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathX86_64(
check->GetLoadClass(), check, check->GetDexPc(), true);
codegen_->AddSlowPath(slow_path);
GenerateClassInitializationCheck(slow_path,
@@ -5608,7 +5608,7 @@
void LocationsBuilderX86_64::VisitLoadString(HLoadString* load) {
LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind);
if (load->GetLoadKind() == HLoadString::LoadKind::kRuntimeCall) {
locations->SetOut(Location::RegisterLocation(RAX));
} else {
@@ -5671,7 +5671,7 @@
Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
// /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86_64(load);
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadStringSlowPathX86_64(load);
codegen_->AddSlowPath(slow_path);
__ testl(out, out);
__ j(kEqual, slow_path->GetEntryLabel());
@@ -5707,7 +5707,7 @@
void LocationsBuilderX86_64::VisitLoadException(HLoadException* load) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -5716,7 +5716,7 @@
}
void LocationsBuilderX86_64::VisitClearException(HClearException* clear) {
- new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall);
}
void InstructionCodeGeneratorX86_64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
@@ -5724,8 +5724,8 @@
}
void LocationsBuilderX86_64::VisitThrow(HThrow* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
}
@@ -5775,7 +5775,8 @@
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
if (baker_read_barrier_slow_path) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -5960,8 +5961,8 @@
__ cmpl(out, Address(CpuRegister(RSP), cls.GetStackIndex()));
}
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86_64(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ j(kNotEqual, slow_path->GetEntryLabel());
__ movl(out, Immediate(1));
@@ -5992,8 +5993,8 @@
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86_64(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
if (zero.IsLinked()) {
@@ -6041,7 +6042,8 @@
LocationSummary::CallKind call_kind = is_fatal_slow_path
? LocationSummary::kNoCall
: LocationSummary::kCallOnSlowPath;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
if (type_check_kind == TypeCheckKind::kInterfaceCheck) {
// Require a register for the interface check since there is a loop that compares the class to
@@ -6086,8 +6088,8 @@
bool is_type_check_slow_path_fatal =
IsTypeCheckSlowPathFatal(type_check_kind, instruction->CanThrowIntoCatchBlock());
SlowPathCode* type_check_slow_path =
- new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(instruction,
- is_type_check_slow_path_fatal);
+ new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86_64(instruction,
+ is_type_check_slow_path_fatal);
codegen_->AddSlowPath(type_check_slow_path);
@@ -6285,8 +6287,8 @@
}
void LocationsBuilderX86_64::VisitMonitorOperation(HMonitorOperation* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
}
@@ -6308,7 +6310,7 @@
void LocationsBuilderX86_64::HandleBitwiseOperation(HBinaryOperation* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
DCHECK(instruction->GetResultType() == DataType::Type::kInt32
|| instruction->GetResultType() == DataType::Type::kInt64);
locations->SetInAt(0, Location::RequiresRegister());
@@ -6512,7 +6514,7 @@
"have different sizes.");
// Slow path marking the GC root `root`.
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86_64(
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathX86_64(
instruction, root, /* unpoison_ref_before_marking */ false);
codegen_->AddSlowPath(slow_path);
@@ -6644,10 +6646,10 @@
if (always_update_field) {
DCHECK(temp1 != nullptr);
DCHECK(temp2 != nullptr);
- slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkAndUpdateFieldSlowPathX86_64(
+ slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86_64(
instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp1, *temp2);
} else {
- slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86_64(
+ slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathX86_64(
instruction, ref, /* unpoison_ref_before_marking */ true);
}
AddSlowPath(slow_path);
@@ -6680,7 +6682,7 @@
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCode* slow_path = new (GetGraph()->GetArena())
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator())
ReadBarrierForHeapReferenceSlowPathX86_64(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -6716,7 +6718,7 @@
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCode* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathX86_64(instruction, out, root);
+ new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathX86_64(instruction, out, root);
AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
@@ -6736,7 +6738,7 @@
// Simple implementation of packed switch - generate cascaded compare/jumps.
void LocationsBuilderX86_64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
@@ -7024,22 +7026,22 @@
}
Address CodeGeneratorX86_64::LiteralDoubleAddress(double v) {
- AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddDouble(v));
+ AssemblerFixup* fixup = new (GetGraph()->GetAllocator()) RIPFixup(*this, __ AddDouble(v));
return Address::RIP(fixup);
}
Address CodeGeneratorX86_64::LiteralFloatAddress(float v) {
- AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddFloat(v));
+ AssemblerFixup* fixup = new (GetGraph()->GetAllocator()) RIPFixup(*this, __ AddFloat(v));
return Address::RIP(fixup);
}
Address CodeGeneratorX86_64::LiteralInt32Address(int32_t v) {
- AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddInt32(v));
+ AssemblerFixup* fixup = new (GetGraph()->GetAllocator()) RIPFixup(*this, __ AddInt32(v));
return Address::RIP(fixup);
}
Address CodeGeneratorX86_64::LiteralInt64Address(int64_t v) {
- AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddInt64(v));
+ AssemblerFixup* fixup = new (GetGraph()->GetAllocator()) RIPFixup(*this, __ AddInt64(v));
return Address::RIP(fixup);
}
@@ -7058,7 +7060,7 @@
}
// Let the parallel move resolver take care of all of this.
- HParallelMove parallel_move(GetGraph()->GetArena());
+ HParallelMove parallel_move(GetGraph()->GetAllocator());
parallel_move.AddMove(return_loc, trg, type, nullptr);
GetMoveResolver()->EmitNativeCode(¶llel_move);
}
@@ -7066,7 +7068,7 @@
Address CodeGeneratorX86_64::LiteralCaseTable(HPackedSwitch* switch_instr) {
// Create a fixup to be used to create and address the jump table.
JumpTableRIPFixup* table_fixup =
- new (GetGraph()->GetArena()) JumpTableRIPFixup(*this, switch_instr);
+ new (GetGraph()->GetAllocator()) JumpTableRIPFixup(*this, switch_instr);
// We have to populate the jump tables.
fixups_to_jump_tables_.push_back(table_fixup);
diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc
index b558eb1..d8ebac9 100644
--- a/compiler/optimizing/code_sinking.cc
+++ b/compiler/optimizing/code_sinking.cc
@@ -16,6 +16,10 @@
#include "code_sinking.h"
+#include "base/arena_bit_vector.h"
+#include "base/bit_vector-inl.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
#include "common_dominator.h"
#include "nodes.h"
@@ -115,7 +119,7 @@
static void AddInstruction(HInstruction* instruction,
const ArenaBitVector& processed_instructions,
const ArenaBitVector& discard_blocks,
- ArenaVector<HInstruction*>* worklist) {
+ ScopedArenaVector<HInstruction*>* worklist) {
// Add to the work list if the instruction is not in the list of blocks
// to discard, hasn't been already processed and is of interest.
if (!discard_blocks.IsBitSet(instruction->GetBlock()->GetBlockId()) &&
@@ -128,7 +132,7 @@
static void AddInputs(HInstruction* instruction,
const ArenaBitVector& processed_instructions,
const ArenaBitVector& discard_blocks,
- ArenaVector<HInstruction*>* worklist) {
+ ScopedArenaVector<HInstruction*>* worklist) {
for (HInstruction* input : instruction->GetInputs()) {
AddInstruction(input, processed_instructions, discard_blocks, worklist);
}
@@ -137,7 +141,7 @@
static void AddInputs(HBasicBlock* block,
const ArenaBitVector& processed_instructions,
const ArenaBitVector& discard_blocks,
- ArenaVector<HInstruction*>* worklist) {
+ ScopedArenaVector<HInstruction*>* worklist) {
for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
AddInputs(it.Current(), processed_instructions, discard_blocks, worklist);
}
@@ -242,17 +246,19 @@
void CodeSinking::SinkCodeToUncommonBranch(HBasicBlock* end_block) {
- // Local allocator to discard data structures created below at the end of
- // this optimization.
- ArenaAllocator allocator(graph_->GetArena()->GetArenaPool());
+ // Local allocator to discard data structures created below at the end of this optimization.
+ ScopedArenaAllocator allocator(graph_->GetArenaStack());
size_t number_of_instructions = graph_->GetCurrentInstructionId();
- ArenaVector<HInstruction*> worklist(allocator.Adapter(kArenaAllocMisc));
+ ScopedArenaVector<HInstruction*> worklist(allocator.Adapter(kArenaAllocMisc));
ArenaBitVector processed_instructions(&allocator, number_of_instructions, /* expandable */ false);
+ processed_instructions.ClearAllBits();
ArenaBitVector post_dominated(&allocator, graph_->GetBlocks().size(), /* expandable */ false);
+ post_dominated.ClearAllBits();
ArenaBitVector instructions_that_can_move(
&allocator, number_of_instructions, /* expandable */ false);
- ArenaVector<HInstruction*> move_in_order(allocator.Adapter(kArenaAllocMisc));
+ instructions_that_can_move.ClearAllBits();
+ ScopedArenaVector<HInstruction*> move_in_order(allocator.Adapter(kArenaAllocMisc));
// Step (1): Visit post order to get a subset of blocks post dominated by `end_block`.
// TODO(ngeoffray): Getting the full set of post-dominated shoud be done by
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 896fcfa..e35c7c7 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -72,34 +72,37 @@
return v;
}
-static void TestCode(const uint16_t* data,
- bool has_result = false,
- int32_t expected = 0) {
+class CodegenTest : public OptimizingUnitTest {
+ protected:
+ void TestCode(const uint16_t* data, bool has_result = false, int32_t expected = 0);
+ void TestCodeLong(const uint16_t* data, bool has_result, int64_t expected);
+ void TestComparison(IfCondition condition,
+ int64_t i,
+ int64_t j,
+ DataType::Type type,
+ const CodegenTargetConfig target_config);
+};
+
+void CodegenTest::TestCode(const uint16_t* data, bool has_result, int32_t expected) {
for (const CodegenTargetConfig& target_config : GetTargetConfigs()) {
- ArenaPool pool;
- ArenaAllocator arena(&pool);
- HGraph* graph = CreateCFG(&arena, data);
+ ResetPoolAndAllocator();
+ HGraph* graph = CreateCFG(data);
// Remove suspend checks, they cannot be executed in this context.
RemoveSuspendChecks(graph);
RunCode(target_config, graph, [](HGraph*) {}, has_result, expected);
}
}
-static void TestCodeLong(const uint16_t* data,
- bool has_result,
- int64_t expected) {
+void CodegenTest::TestCodeLong(const uint16_t* data, bool has_result, int64_t expected) {
for (const CodegenTargetConfig& target_config : GetTargetConfigs()) {
- ArenaPool pool;
- ArenaAllocator arena(&pool);
- HGraph* graph = CreateCFG(&arena, data, DataType::Type::kInt64);
+ ResetPoolAndAllocator();
+ HGraph* graph = CreateCFG(data, DataType::Type::kInt64);
// Remove suspend checks, they cannot be executed in this context.
RemoveSuspendChecks(graph);
RunCode(target_config, graph, [](HGraph*) {}, has_result, expected);
}
}
-class CodegenTest : public CommonCompilerTest {};
-
TEST_F(CodegenTest, ReturnVoid) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(Instruction::RETURN_VOID);
TestCode(data);
@@ -412,28 +415,25 @@
TEST_F(CodegenTest, NonMaterializedCondition) {
for (CodegenTargetConfig target_config : GetTargetConfigs()) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ HGraph* graph = CreateGraph();
- HGraph* graph = CreateGraph(&allocator);
-
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- entry->AddInstruction(new (&allocator) HGoto());
+ entry->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* first_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* first_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(first_block);
entry->AddSuccessor(first_block);
HIntConstant* constant0 = graph->GetIntConstant(0);
HIntConstant* constant1 = graph->GetIntConstant(1);
- HEqual* equal = new (&allocator) HEqual(constant0, constant0);
+ HEqual* equal = new (GetAllocator()) HEqual(constant0, constant0);
first_block->AddInstruction(equal);
- first_block->AddInstruction(new (&allocator) HIf(equal));
+ first_block->AddInstruction(new (GetAllocator()) HIf(equal));
- HBasicBlock* then_block = new (&allocator) HBasicBlock(graph);
- HBasicBlock* else_block = new (&allocator) HBasicBlock(graph);
- HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* then_block = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* else_block = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph);
graph->SetExitBlock(exit_block);
graph->AddBlock(then_block);
@@ -444,9 +444,9 @@
then_block->AddSuccessor(exit_block);
else_block->AddSuccessor(exit_block);
- exit_block->AddInstruction(new (&allocator) HExit());
- then_block->AddInstruction(new (&allocator) HReturn(constant0));
- else_block->AddInstruction(new (&allocator) HReturn(constant1));
+ exit_block->AddInstruction(new (GetAllocator()) HExit());
+ then_block->AddInstruction(new (GetAllocator()) HReturn(constant0));
+ else_block->AddInstruction(new (GetAllocator()) HReturn(constant1));
ASSERT_FALSE(equal->IsEmittedAtUseSite());
graph->BuildDominatorTree();
@@ -455,7 +455,7 @@
auto hook_before_codegen = [](HGraph* graph_in) {
HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0];
- HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
+ HParallelMove* move = new (graph_in->GetAllocator()) HParallelMove(graph_in->GetAllocator());
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
@@ -475,19 +475,17 @@
int rhs[] = {2, 1, 2, -1, 0xabc};
for (size_t i = 0; i < arraysize(lhs); i++) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
+ HGraph* graph = CreateGraph();
- HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry_block);
graph->SetEntryBlock(entry_block);
- entry_block->AddInstruction(new (&allocator) HGoto());
- HBasicBlock* code_block = new (&allocator) HBasicBlock(graph);
+ entry_block->AddInstruction(new (GetAllocator()) HGoto());
+ HBasicBlock* code_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(code_block);
- HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(exit_block);
- exit_block->AddInstruction(new (&allocator) HExit());
+ exit_block->AddInstruction(new (GetAllocator()) HExit());
entry_block->AddSuccessor(code_block);
code_block->AddSuccessor(exit_block);
@@ -503,7 +501,8 @@
graph->BuildDominatorTree();
auto hook_before_codegen = [](HGraph* graph_in) {
HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0];
- HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
+ HParallelMove* move =
+ new (graph_in->GetAllocator()) HParallelMove(graph_in->GetAllocator());
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
RunCode(target_config, graph, hook_before_codegen, true, lhs[i] < rhs[i]);
@@ -523,24 +522,22 @@
for (size_t i = 0; i < arraysize(lhs); i++) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
+ HGraph* graph = CreateGraph();
- HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry_block);
graph->SetEntryBlock(entry_block);
- entry_block->AddInstruction(new (&allocator) HGoto());
+ entry_block->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* if_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* if_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(if_block);
- HBasicBlock* if_true_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* if_true_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(if_true_block);
- HBasicBlock* if_false_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* if_false_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(if_false_block);
- HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(exit_block);
- exit_block->AddInstruction(new (&allocator) HExit());
+ exit_block->AddInstruction(new (GetAllocator()) HExit());
graph->SetEntryBlock(entry_block);
entry_block->AddSuccessor(if_block);
@@ -571,7 +568,8 @@
graph->BuildDominatorTree();
auto hook_before_codegen = [](HGraph* graph_in) {
HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0];
- HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
+ HParallelMove* move =
+ new (graph_in->GetAllocator()) HParallelMove(graph_in->GetAllocator());
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
RunCode(target_config, graph, hook_before_codegen, true, lhs[i] < rhs[i]);
@@ -599,27 +597,25 @@
}
// Helper method.
-static void TestComparison(IfCondition condition,
- int64_t i,
- int64_t j,
- DataType::Type type,
- const CodegenTargetConfig target_config) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
+void CodegenTest::TestComparison(IfCondition condition,
+ int64_t i,
+ int64_t j,
+ DataType::Type type,
+ const CodegenTargetConfig target_config) {
+ HGraph* graph = CreateGraph();
- HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry_block);
graph->SetEntryBlock(entry_block);
- entry_block->AddInstruction(new (&allocator) HGoto());
+ entry_block->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
- HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(exit_block);
graph->SetExitBlock(exit_block);
- exit_block->AddInstruction(new (&allocator) HExit());
+ exit_block->AddInstruction(new (GetAllocator()) HExit());
entry_block->AddSuccessor(block);
block->AddSuccessor(exit_block);
@@ -641,48 +637,48 @@
const uint64_t y = j;
switch (condition) {
case kCondEQ:
- comparison = new (&allocator) HEqual(op1, op2);
+ comparison = new (GetAllocator()) HEqual(op1, op2);
expected_result = (i == j);
break;
case kCondNE:
- comparison = new (&allocator) HNotEqual(op1, op2);
+ comparison = new (GetAllocator()) HNotEqual(op1, op2);
expected_result = (i != j);
break;
case kCondLT:
- comparison = new (&allocator) HLessThan(op1, op2);
+ comparison = new (GetAllocator()) HLessThan(op1, op2);
expected_result = (i < j);
break;
case kCondLE:
- comparison = new (&allocator) HLessThanOrEqual(op1, op2);
+ comparison = new (GetAllocator()) HLessThanOrEqual(op1, op2);
expected_result = (i <= j);
break;
case kCondGT:
- comparison = new (&allocator) HGreaterThan(op1, op2);
+ comparison = new (GetAllocator()) HGreaterThan(op1, op2);
expected_result = (i > j);
break;
case kCondGE:
- comparison = new (&allocator) HGreaterThanOrEqual(op1, op2);
+ comparison = new (GetAllocator()) HGreaterThanOrEqual(op1, op2);
expected_result = (i >= j);
break;
case kCondB:
- comparison = new (&allocator) HBelow(op1, op2);
+ comparison = new (GetAllocator()) HBelow(op1, op2);
expected_result = (x < y);
break;
case kCondBE:
- comparison = new (&allocator) HBelowOrEqual(op1, op2);
+ comparison = new (GetAllocator()) HBelowOrEqual(op1, op2);
expected_result = (x <= y);
break;
case kCondA:
- comparison = new (&allocator) HAbove(op1, op2);
+ comparison = new (GetAllocator()) HAbove(op1, op2);
expected_result = (x > y);
break;
case kCondAE:
- comparison = new (&allocator) HAboveOrEqual(op1, op2);
+ comparison = new (GetAllocator()) HAboveOrEqual(op1, op2);
expected_result = (x >= y);
break;
}
block->AddInstruction(comparison);
- block->AddInstruction(new (&allocator) HReturn(comparison));
+ block->AddInstruction(new (GetAllocator()) HReturn(comparison));
graph->BuildDominatorTree();
RunCode(target_config, graph, [](HGraph*) {}, true, expected_result);
@@ -718,9 +714,7 @@
TEST_F(CodegenTest, ARMVIXLParallelMoveResolver) {
std::unique_ptr<const ArmInstructionSetFeatures> features(
ArmInstructionSetFeatures::FromCppDefines());
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
+ HGraph* graph = CreateGraph();
arm::CodeGeneratorARMVIXL codegen(graph, *features.get(), CompilerOptions());
codegen.Initialize();
@@ -729,7 +723,7 @@
// int mem2) which was faulty (before the fix). So previously GPR and FP scratch registers were
// used as temps; however GPR scratch register is required for big stack offsets which don't fit
// LDR encoding. So the following code is a regression test for that situation.
- HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena());
+ HParallelMove* move = new (graph->GetAllocator()) HParallelMove(graph->GetAllocator());
move->AddMove(Location::StackSlot(0), Location::StackSlot(8192), DataType::Type::kInt32, nullptr);
move->AddMove(Location::StackSlot(8192), Location::StackSlot(0), DataType::Type::kInt32, nullptr);
codegen.GetMoveResolver()->EmitNativeCode(move);
@@ -744,9 +738,7 @@
TEST_F(CodegenTest, ARM64ParallelMoveResolverB34760542) {
std::unique_ptr<const Arm64InstructionSetFeatures> features(
Arm64InstructionSetFeatures::FromCppDefines());
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
+ HGraph* graph = CreateGraph();
arm64::CodeGeneratorARM64 codegen(graph, *features.get(), CompilerOptions());
codegen.Initialize();
@@ -777,7 +769,7 @@
// The solution used so far is to use a floating-point temp register
// (D31) in step #2, so that IP1 is available for step #3.
- HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena());
+ HParallelMove* move = new (graph->GetAllocator()) HParallelMove(graph->GetAllocator());
move->AddMove(Location::DoubleStackSlot(0),
Location::DoubleStackSlot(257),
DataType::Type::kFloat64,
@@ -796,16 +788,14 @@
TEST_F(CodegenTest, ARM64ParallelMoveResolverSIMD) {
std::unique_ptr<const Arm64InstructionSetFeatures> features(
Arm64InstructionSetFeatures::FromCppDefines());
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
+ HGraph* graph = CreateGraph();
arm64::CodeGeneratorARM64 codegen(graph, *features.get(), CompilerOptions());
codegen.Initialize();
graph->SetHasSIMD(true);
for (int i = 0; i < 2; i++) {
- HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena());
+ HParallelMove* move = new (graph->GetAllocator()) HParallelMove(graph->GetAllocator());
move->AddMove(Location::SIMDStackSlot(0),
Location::SIMDStackSlot(257),
DataType::Type::kFloat64,
@@ -841,33 +831,31 @@
return;
}
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
+ HGraph* graph = CreateGraph();
- HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry_block);
graph->SetEntryBlock(entry_block);
- entry_block->AddInstruction(new (&allocator) HGoto());
+ entry_block->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
- HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(exit_block);
graph->SetExitBlock(exit_block);
- exit_block->AddInstruction(new (&allocator) HExit());
+ exit_block->AddInstruction(new (GetAllocator()) HExit());
entry_block->AddSuccessor(block);
block->AddSuccessor(exit_block);
// To simplify matters, don't create PC-relative HLoadClass or HLoadString.
// Instead, generate HMipsComputeBaseMethodAddress directly.
- HMipsComputeBaseMethodAddress* base = new (&allocator) HMipsComputeBaseMethodAddress();
+ HMipsComputeBaseMethodAddress* base = new (GetAllocator()) HMipsComputeBaseMethodAddress();
block->AddInstruction(base);
// HMipsComputeBaseMethodAddress is defined as int, so just make the
// compiled method return it.
- block->AddInstruction(new (&allocator) HReturn(base));
+ block->AddInstruction(new (GetAllocator()) HReturn(base));
graph->BuildDominatorTree();
diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h
index aa4f5da..bcbcc12 100644
--- a/compiler/optimizing/codegen_test_utils.h
+++ b/compiler/optimizing/codegen_test_utils.h
@@ -295,10 +295,15 @@
const std::function<void(HGraph*)>& hook_before_codegen,
bool has_result,
Expected expected) {
- SsaLivenessAnalysis liveness(graph, codegen);
- PrepareForRegisterAllocation(graph).Run();
- liveness.Analyze();
- RegisterAllocator::Create(graph->GetArena(), codegen, liveness)->AllocateRegisters();
+ {
+ ScopedArenaAllocator local_allocator(graph->GetArenaStack());
+ SsaLivenessAnalysis liveness(graph, codegen, &local_allocator);
+ PrepareForRegisterAllocation(graph).Run();
+ liveness.Analyze();
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(&local_allocator, codegen, liveness);
+ register_allocator->AllocateRegisters();
+ }
hook_before_codegen(graph);
InternalCodeAllocator allocator;
codegen->Compile(&allocator);
@@ -331,7 +336,7 @@
CodeGenerator* create_codegen_arm_vixl32(HGraph* graph, const CompilerOptions& compiler_options) {
std::unique_ptr<const ArmInstructionSetFeatures> features_arm(
ArmInstructionSetFeatures::FromCppDefines());
- return new (graph->GetArena())
+ return new (graph->GetAllocator())
TestCodeGeneratorARMVIXL(graph, *features_arm.get(), compiler_options);
}
#endif
@@ -340,7 +345,7 @@
CodeGenerator* create_codegen_arm64(HGraph* graph, const CompilerOptions& compiler_options) {
std::unique_ptr<const Arm64InstructionSetFeatures> features_arm64(
Arm64InstructionSetFeatures::FromCppDefines());
- return new (graph->GetArena())
+ return new (graph->GetAllocator())
TestCodeGeneratorARM64(graph, *features_arm64.get(), compiler_options);
}
#endif
@@ -349,7 +354,8 @@
CodeGenerator* create_codegen_x86(HGraph* graph, const CompilerOptions& compiler_options) {
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
- return new (graph->GetArena()) TestCodeGeneratorX86(graph, *features_x86.get(), compiler_options);
+ return new (graph->GetAllocator()) TestCodeGeneratorX86(
+ graph, *features_x86.get(), compiler_options);
}
#endif
@@ -357,7 +363,7 @@
CodeGenerator* create_codegen_x86_64(HGraph* graph, const CompilerOptions& compiler_options) {
std::unique_ptr<const X86_64InstructionSetFeatures> features_x86_64(
X86_64InstructionSetFeatures::FromCppDefines());
- return new (graph->GetArena())
+ return new (graph->GetAllocator())
x86_64::CodeGeneratorX86_64(graph, *features_x86_64.get(), compiler_options);
}
#endif
@@ -366,7 +372,7 @@
CodeGenerator* create_codegen_mips(HGraph* graph, const CompilerOptions& compiler_options) {
std::unique_ptr<const MipsInstructionSetFeatures> features_mips(
MipsInstructionSetFeatures::FromCppDefines());
- return new (graph->GetArena())
+ return new (graph->GetAllocator())
mips::CodeGeneratorMIPS(graph, *features_mips.get(), compiler_options);
}
#endif
@@ -375,7 +381,7 @@
CodeGenerator* create_codegen_mips64(HGraph* graph, const CompilerOptions& compiler_options) {
std::unique_ptr<const Mips64InstructionSetFeatures> features_mips64(
Mips64InstructionSetFeatures::FromCppDefines());
- return new (graph->GetArena())
+ return new (graph->GetAllocator())
mips64::CodeGeneratorMIPS64(graph, *features_mips64.get(), compiler_options);
}
#endif
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index c85a2e3..e1980e0 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -32,11 +32,9 @@
/**
* Fixture class for the constant folding and dce tests.
*/
-class ConstantFoldingTest : public CommonCompilerTest {
+class ConstantFoldingTest : public OptimizingUnitTest {
public:
- ConstantFoldingTest() : pool_(), allocator_(&pool_) {
- graph_ = CreateGraph(&allocator_);
- }
+ ConstantFoldingTest() : graph_(nullptr) { }
void TestCode(const uint16_t* data,
const std::string& expected_before,
@@ -44,7 +42,7 @@
const std::string& expected_after_dce,
const std::function<void(HGraph*)>& check_after_cf,
DataType::Type return_type = DataType::Type::kInt32) {
- graph_ = CreateCFG(&allocator_, data, return_type);
+ graph_ = CreateCFG(data, return_type);
TestCodeOnReadyGraph(expected_before,
expected_after_cf,
expected_after_dce,
@@ -88,8 +86,6 @@
EXPECT_EQ(expected_after_dce, actual_after_dce);
}
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
};
@@ -742,46 +738,46 @@
* in the bytecode, we need to set up the graph explicitly.
*/
TEST_F(ConstantFoldingTest, UnsignedComparisonsWithZero) {
- graph_ = CreateGraph(&allocator_);
- HBasicBlock* entry_block = new (&allocator_) HBasicBlock(graph_);
+ graph_ = CreateGraph();
+ HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry_block);
graph_->SetEntryBlock(entry_block);
- HBasicBlock* block = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block);
- HBasicBlock* exit_block = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(exit_block);
graph_->SetExitBlock(exit_block);
entry_block->AddSuccessor(block);
block->AddSuccessor(exit_block);
// Make various unsigned comparisons with zero against a parameter.
- HInstruction* parameter = new (&allocator_) HParameterValue(
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32, true);
entry_block->AddInstruction(parameter);
- entry_block->AddInstruction(new (&allocator_) HGoto());
+ entry_block->AddInstruction(new (GetAllocator()) HGoto());
HInstruction* zero = graph_->GetIntConstant(0);
HInstruction* last;
- block->AddInstruction(last = new (&allocator_) HAbove(zero, parameter));
- block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
- block->AddInstruction(last = new (&allocator_) HAbove(parameter, zero));
- block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
- block->AddInstruction(last = new (&allocator_) HAboveOrEqual(zero, parameter));
- block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
- block->AddInstruction(last = new (&allocator_) HAboveOrEqual(parameter, zero));
- block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
- block->AddInstruction(last = new (&allocator_) HBelow(zero, parameter));
- block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
- block->AddInstruction(last = new (&allocator_) HBelow(parameter, zero));
- block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
- block->AddInstruction(last = new (&allocator_) HBelowOrEqual(zero, parameter));
- block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
- block->AddInstruction(last = new (&allocator_) HBelowOrEqual(parameter, zero));
- block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
- block->AddInstruction(new (&allocator_) HReturn(zero));
+ block->AddInstruction(last = new (GetAllocator()) HAbove(zero, parameter));
+ block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0));
+ block->AddInstruction(last = new (GetAllocator()) HAbove(parameter, zero));
+ block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0));
+ block->AddInstruction(last = new (GetAllocator()) HAboveOrEqual(zero, parameter));
+ block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0));
+ block->AddInstruction(last = new (GetAllocator()) HAboveOrEqual(parameter, zero));
+ block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0));
+ block->AddInstruction(last = new (GetAllocator()) HBelow(zero, parameter));
+ block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0));
+ block->AddInstruction(last = new (GetAllocator()) HBelow(parameter, zero));
+ block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0));
+ block->AddInstruction(last = new (GetAllocator()) HBelowOrEqual(zero, parameter));
+ block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0));
+ block->AddInstruction(last = new (GetAllocator()) HBelowOrEqual(parameter, zero));
+ block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0));
+ block->AddInstruction(new (GetAllocator()) HReturn(zero));
- exit_block->AddInstruction(new (&allocator_) HExit());
+ exit_block->AddInstruction(new (GetAllocator()) HExit());
graph_->BuildDominatorTree();
diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.cc b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
index ff7ce60..4a66cd2 100644
--- a/compiler/optimizing/constructor_fence_redundancy_elimination.cc
+++ b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
@@ -17,6 +17,8 @@
#include "constructor_fence_redundancy_elimination.h"
#include "base/arena_allocator.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
namespace art {
@@ -27,7 +29,7 @@
public:
CFREVisitor(HGraph* graph, OptimizingCompilerStats* stats)
: HGraphVisitor(graph),
- scoped_allocator_(graph->GetArena()->GetArenaPool()),
+ scoped_allocator_(graph->GetArenaStack()),
candidate_fences_(scoped_allocator_.Adapter(kArenaAllocCFRE)),
candidate_fence_targets_(scoped_allocator_.Adapter(kArenaAllocCFRE)),
stats_(stats) {}
@@ -227,9 +229,8 @@
MaybeRecordStat(stats_, MethodCompilationStat::kConstructorFenceRemovedCFRE);
}
- // Phase-local heap memory allocator for CFRE optimizer. Storage obtained
- // through this allocator is immediately released when the CFRE optimizer is done.
- ArenaAllocator scoped_allocator_;
+ // Phase-local heap memory allocator for CFRE optimizer.
+ ScopedArenaAllocator scoped_allocator_;
// Set of constructor fences that we've seen in the current block.
// Each constructor fences acts as a guard for one or more `targets`.
@@ -237,11 +238,11 @@
//
// Fences are in succession order (e.g. fence[i] succeeds fence[i-1]
// within the same basic block).
- ArenaVector<HConstructorFence*> candidate_fences_;
+ ScopedArenaVector<HConstructorFence*> candidate_fences_;
// Stores a set of the fence targets, to allow faster lookup of whether
// a detected publish is a target of one of the candidate fences.
- ArenaHashSet<HInstruction*> candidate_fence_targets_;
+ ScopedArenaHashSet<HInstruction*> candidate_fence_targets_;
// Used to record stats about the optimization.
OptimizingCompilerStats* const stats_;
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index 9b094e9..5117e07 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -24,7 +24,7 @@
namespace art {
static void MarkReachableBlocks(HGraph* graph, ArenaBitVector* visited) {
- ArenaVector<HBasicBlock*> worklist(graph->GetArena()->Adapter(kArenaAllocDCE));
+ ArenaVector<HBasicBlock*> worklist(graph->GetAllocator()->Adapter(kArenaAllocDCE));
constexpr size_t kDefaultWorlistSize = 8;
worklist.reserve(kDefaultWorlistSize);
visited->SetBit(graph->GetEntryBlock()->GetBlockId());
@@ -306,7 +306,7 @@
bool HDeadCodeElimination::RemoveDeadBlocks() {
// Classify blocks as reachable/unreachable.
- ArenaAllocator* allocator = graph_->GetArena();
+ ArenaAllocator* allocator = graph_->GetAllocator();
ArenaBitVector live_blocks(allocator, graph_->GetBlocks().size(), false, kArenaAllocDCE);
MarkReachableBlocks(graph_, &live_blocks);
diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc
index 96fa540..929572e 100644
--- a/compiler/optimizing/dead_code_elimination_test.cc
+++ b/compiler/optimizing/dead_code_elimination_test.cc
@@ -27,14 +27,17 @@
namespace art {
-class DeadCodeEliminationTest : public CommonCompilerTest {};
+class DeadCodeEliminationTest : public OptimizingUnitTest {
+ protected:
+ void TestCode(const uint16_t* data,
+ const std::string& expected_before,
+ const std::string& expected_after);
+};
-static void TestCode(const uint16_t* data,
- const std::string& expected_before,
- const std::string& expected_after) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+void DeadCodeEliminationTest::TestCode(const uint16_t* data,
+ const std::string& expected_before,
+ const std::string& expected_after) {
+ HGraph* graph = CreateCFG(data);
ASSERT_NE(graph, nullptr);
StringPrettyPrinter printer_before(graph);
diff --git a/compiler/optimizing/dominator_test.cc b/compiler/optimizing/dominator_test.cc
index 50c677a..6bf3a59 100644
--- a/compiler/optimizing/dominator_test.cc
+++ b/compiler/optimizing/dominator_test.cc
@@ -24,12 +24,13 @@
namespace art {
-class OptimizerTest : public CommonCompilerTest {};
+class OptimizerTest : public OptimizingUnitTest {
+ protected:
+ void TestCode(const uint16_t* data, const uint32_t* blocks, size_t blocks_length);
+};
-static void TestCode(const uint16_t* data, const uint32_t* blocks, size_t blocks_length) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+void OptimizerTest::TestCode(const uint16_t* data, const uint32_t* blocks, size_t blocks_length) {
+ HGraph* graph = CreateCFG(data);
ASSERT_EQ(graph->GetBlocks().size(), blocks_length);
for (size_t i = 0, e = blocks_length; i < e; ++i) {
if (blocks[i] == kInvalidBlockId) {
diff --git a/compiler/optimizing/emit_swap_mips_test.cc b/compiler/optimizing/emit_swap_mips_test.cc
index 0e9c81d..36e932c 100644
--- a/compiler/optimizing/emit_swap_mips_test.cc
+++ b/compiler/optimizing/emit_swap_mips_test.cc
@@ -25,16 +25,15 @@
namespace art {
-class EmitSwapMipsTest : public ::testing::Test {
+class EmitSwapMipsTest : public OptimizingUnitTest {
public:
void SetUp() OVERRIDE {
- allocator_.reset(new ArenaAllocator(&pool_));
- graph_ = CreateGraph(allocator_.get());
+ graph_ = CreateGraph();
isa_features_ = MipsInstructionSetFeatures::FromCppDefines();
- codegen_ = new (graph_->GetArena()) mips::CodeGeneratorMIPS(graph_,
- *isa_features_.get(),
- CompilerOptions());
- moves_ = new (allocator_.get()) HParallelMove(allocator_.get());
+ codegen_ = new (graph_->GetAllocator()) mips::CodeGeneratorMIPS(graph_,
+ *isa_features_.get(),
+ CompilerOptions());
+ moves_ = new (GetAllocator()) HParallelMove(GetAllocator());
test_helper_.reset(
new AssemblerTestInfrastructure(GetArchitectureString(),
GetAssemblerCmdName(),
@@ -47,8 +46,9 @@
}
void TearDown() OVERRIDE {
- allocator_.reset();
test_helper_.reset();
+ isa_features_.reset();
+ ResetPoolAndAllocator();
}
// Get the typically used name for this architecture.
@@ -104,12 +104,10 @@
}
protected:
- ArenaPool pool_;
HGraph* graph_;
HParallelMove* moves_;
mips::CodeGeneratorMIPS* codegen_;
mips::MipsAssembler* assembler_;
- std::unique_ptr<ArenaAllocator> allocator_;
std::unique_ptr<AssemblerTestInfrastructure> test_helper_;
std::unique_ptr<const MipsInstructionSetFeatures> isa_features_;
};
diff --git a/compiler/optimizing/find_loops_test.cc b/compiler/optimizing/find_loops_test.cc
index bbd28f5..c917528 100644
--- a/compiler/optimizing/find_loops_test.cc
+++ b/compiler/optimizing/find_loops_test.cc
@@ -27,7 +27,7 @@
namespace art {
-class FindLoopsTest : public CommonCompilerTest {};
+class FindLoopsTest : public OptimizingUnitTest {};
TEST_F(FindLoopsTest, CFG1) {
// Constant is not used.
@@ -35,9 +35,7 @@
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN_VOID);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
for (HBasicBlock* block : graph->GetBlocks()) {
ASSERT_EQ(block->GetLoopInformation(), nullptr);
}
@@ -48,9 +46,7 @@
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
for (HBasicBlock* block : graph->GetBlocks()) {
ASSERT_EQ(block->GetLoopInformation(), nullptr);
}
@@ -64,9 +60,7 @@
Instruction::GOTO | 0x100,
Instruction::RETURN);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
for (HBasicBlock* block : graph->GetBlocks()) {
ASSERT_EQ(block->GetLoopInformation(), nullptr);
}
@@ -81,9 +75,7 @@
Instruction::CONST_4 | 5 << 12 | 0,
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
for (HBasicBlock* block : graph->GetBlocks()) {
ASSERT_EQ(block->GetLoopInformation(), nullptr);
}
@@ -96,9 +88,7 @@
Instruction::CONST_4 | 4 << 12 | 0,
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
for (HBasicBlock* block : graph->GetBlocks()) {
ASSERT_EQ(block->GetLoopInformation(), nullptr);
}
@@ -142,9 +132,7 @@
Instruction::GOTO | 0xFE00,
Instruction::RETURN_VOID);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // pre header
@@ -170,9 +158,7 @@
Instruction::GOTO | 0xFD00,
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // goto block
@@ -195,9 +181,7 @@
Instruction::GOTO | 0xFE00,
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // goto block
@@ -221,9 +205,7 @@
Instruction::GOTO | 0xFB00,
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // pre header
@@ -247,9 +229,7 @@
Instruction::GOTO | 0xFB00,
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // pre header
@@ -272,9 +252,7 @@
Instruction::GOTO | 0xFB00,
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // pre header of outer loop
@@ -303,9 +281,7 @@
Instruction::GOTO | 0xFE00, // second loop
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // pre header of first loop
@@ -333,9 +309,7 @@
Instruction::GOTO | 0xFD00,
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
ASSERT_TRUE(graph->GetBlocks()[3]->IsLoopHeader());
HLoopInformation* info = graph->GetBlocks()[3]->GetLoopInformation();
ASSERT_EQ(1u, info->NumberOfBackEdges());
@@ -349,9 +323,7 @@
Instruction::IF_EQ, 0xFFFF,
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // pre header of first loop
diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h
index 3060c80..6af7b42 100644
--- a/compiler/optimizing/graph_checker.h
+++ b/compiler/optimizing/graph_checker.h
@@ -28,14 +28,14 @@
public:
explicit GraphChecker(HGraph* graph, const char* dump_prefix = "art::GraphChecker: ")
: HGraphDelegateVisitor(graph),
- errors_(graph->GetArena()->Adapter(kArenaAllocGraphChecker)),
+ errors_(graph->GetAllocator()->Adapter(kArenaAllocGraphChecker)),
dump_prefix_(dump_prefix),
- seen_ids_(graph->GetArena(),
+ seen_ids_(graph->GetAllocator(),
graph->GetCurrentInstructionId(),
false,
kArenaAllocGraphChecker),
- blocks_storage_(graph->GetArena()->Adapter(kArenaAllocGraphChecker)),
- visited_storage_(graph->GetArena(), 0u, true, kArenaAllocGraphChecker) {}
+ blocks_storage_(graph->GetAllocator()->Adapter(kArenaAllocGraphChecker)),
+ visited_storage_(graph->GetAllocator(), 0u, true, kArenaAllocGraphChecker) {}
// Check the whole graph (in reverse post-order).
void Run() {
diff --git a/compiler/optimizing/graph_checker_test.cc b/compiler/optimizing/graph_checker_test.cc
index 2b82319..9ca3e49 100644
--- a/compiler/optimizing/graph_checker_test.cc
+++ b/compiler/optimizing/graph_checker_test.cc
@@ -19,6 +19,12 @@
namespace art {
+class GraphCheckerTest : public OptimizingUnitTest {
+ protected:
+ HGraph* CreateSimpleCFG();
+ void TestCode(const uint16_t* data);
+};
+
/**
* Create a simple control-flow graph composed of two blocks:
*
@@ -27,14 +33,14 @@
* BasicBlock 1, pred: 0
* 1: Exit
*/
-HGraph* CreateSimpleCFG(ArenaAllocator* allocator) {
- HGraph* graph = CreateGraph(allocator);
- HBasicBlock* entry_block = new (allocator) HBasicBlock(graph);
- entry_block->AddInstruction(new (allocator) HReturnVoid());
+HGraph* GraphCheckerTest::CreateSimpleCFG() {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph);
+ entry_block->AddInstruction(new (GetAllocator()) HReturnVoid());
graph->AddBlock(entry_block);
graph->SetEntryBlock(entry_block);
- HBasicBlock* exit_block = new (allocator) HBasicBlock(graph);
- exit_block->AddInstruction(new (allocator) HExit());
+ HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph);
+ exit_block->AddInstruction(new (GetAllocator()) HExit());
graph->AddBlock(exit_block);
graph->SetExitBlock(exit_block);
entry_block->AddSuccessor(exit_block);
@@ -42,10 +48,8 @@
return graph;
}
-static void TestCode(const uint16_t* data) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+void GraphCheckerTest::TestCode(const uint16_t* data) {
+ HGraph* graph = CreateCFG(data);
ASSERT_NE(graph, nullptr);
GraphChecker graph_checker(graph);
@@ -53,8 +57,6 @@
ASSERT_TRUE(graph_checker.IsValid());
}
-class GraphCheckerTest : public CommonCompilerTest {};
-
TEST_F(GraphCheckerTest, ReturnVoid) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::RETURN_VOID);
@@ -93,10 +95,7 @@
// Test case with an invalid graph containing inconsistent
// predecessor/successor arcs in CFG.
TEST_F(GraphCheckerTest, InconsistentPredecessorsAndSuccessors) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateSimpleCFG(&allocator);
+ HGraph* graph = CreateSimpleCFG();
GraphChecker graph_checker(graph);
graph_checker.Run();
ASSERT_TRUE(graph_checker.IsValid());
@@ -111,10 +110,7 @@
// Test case with an invalid graph containing a non-branch last
// instruction in a block.
TEST_F(GraphCheckerTest, BlockEndingWithNonBranchInstruction) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateSimpleCFG(&allocator);
+ HGraph* graph = CreateSimpleCFG();
GraphChecker graph_checker(graph);
graph_checker.Run();
ASSERT_TRUE(graph_checker.IsValid());
diff --git a/compiler/optimizing/graph_test.cc b/compiler/optimizing/graph_test.cc
index 28ee3a5..29af808 100644
--- a/compiler/optimizing/graph_test.cc
+++ b/compiler/optimizing/graph_test.cc
@@ -24,43 +24,52 @@
namespace art {
-static HBasicBlock* createIfBlock(HGraph* graph, ArenaAllocator* allocator) {
- HBasicBlock* if_block = new (allocator) HBasicBlock(graph);
+class GraphTest : public OptimizingUnitTest {
+ protected:
+ HBasicBlock* CreateIfBlock(HGraph* graph);
+ HBasicBlock* CreateGotoBlock(HGraph* graph);
+ HBasicBlock* CreateEntryBlock(HGraph* graph);
+ HBasicBlock* CreateReturnBlock(HGraph* graph);
+ HBasicBlock* CreateExitBlock(HGraph* graph);
+};
+
+HBasicBlock* GraphTest::CreateIfBlock(HGraph* graph) {
+ HBasicBlock* if_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(if_block);
HInstruction* instr = graph->GetIntConstant(4);
- HInstruction* equal = new (allocator) HEqual(instr, instr);
+ HInstruction* equal = new (GetAllocator()) HEqual(instr, instr);
if_block->AddInstruction(equal);
- instr = new (allocator) HIf(equal);
+ instr = new (GetAllocator()) HIf(equal);
if_block->AddInstruction(instr);
return if_block;
}
-static HBasicBlock* createGotoBlock(HGraph* graph, ArenaAllocator* allocator) {
- HBasicBlock* block = new (allocator) HBasicBlock(graph);
+HBasicBlock* GraphTest::CreateGotoBlock(HGraph* graph) {
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
- HInstruction* got = new (allocator) HGoto();
+ HInstruction* got = new (GetAllocator()) HGoto();
block->AddInstruction(got);
return block;
}
-static HBasicBlock* createEntryBlock(HGraph* graph, ArenaAllocator* allocator) {
- HBasicBlock* block = createGotoBlock(graph, allocator);
+HBasicBlock* GraphTest::CreateEntryBlock(HGraph* graph) {
+ HBasicBlock* block = CreateGotoBlock(graph);
graph->SetEntryBlock(block);
return block;
}
-static HBasicBlock* createReturnBlock(HGraph* graph, ArenaAllocator* allocator) {
- HBasicBlock* block = new (allocator) HBasicBlock(graph);
+HBasicBlock* GraphTest::CreateReturnBlock(HGraph* graph) {
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
- HInstruction* return_instr = new (allocator) HReturnVoid();
+ HInstruction* return_instr = new (GetAllocator()) HReturnVoid();
block->AddInstruction(return_instr);
return block;
}
-static HBasicBlock* createExitBlock(HGraph* graph, ArenaAllocator* allocator) {
- HBasicBlock* block = new (allocator) HBasicBlock(graph);
+HBasicBlock* GraphTest::CreateExitBlock(HGraph* graph) {
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
- HInstruction* exit_instr = new (allocator) HExit();
+ HInstruction* exit_instr = new (GetAllocator()) HExit();
block->AddInstruction(exit_instr);
return block;
}
@@ -68,16 +77,13 @@
// Test that the successors of an if block stay consistent after a SimplifyCFG.
// This test sets the false block to be the return block.
-TEST(GraphTest, IfSuccessorSimpleJoinBlock1) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
- HBasicBlock* if_block = createIfBlock(graph, &allocator);
- HBasicBlock* if_true = createGotoBlock(graph, &allocator);
- HBasicBlock* return_block = createReturnBlock(graph, &allocator);
- HBasicBlock* exit_block = createExitBlock(graph, &allocator);
+TEST_F(GraphTest, IfSuccessorSimpleJoinBlock1) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry_block = CreateEntryBlock(graph);
+ HBasicBlock* if_block = CreateIfBlock(graph);
+ HBasicBlock* if_true = CreateGotoBlock(graph);
+ HBasicBlock* return_block = CreateReturnBlock(graph);
+ HBasicBlock* exit_block = CreateExitBlock(graph);
entry_block->AddSuccessor(if_block);
if_block->AddSuccessor(if_true);
@@ -103,16 +109,13 @@
// Test that the successors of an if block stay consistent after a SimplifyCFG.
// This test sets the true block to be the return block.
-TEST(GraphTest, IfSuccessorSimpleJoinBlock2) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
- HBasicBlock* if_block = createIfBlock(graph, &allocator);
- HBasicBlock* if_false = createGotoBlock(graph, &allocator);
- HBasicBlock* return_block = createReturnBlock(graph, &allocator);
- HBasicBlock* exit_block = createExitBlock(graph, &allocator);
+TEST_F(GraphTest, IfSuccessorSimpleJoinBlock2) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry_block = CreateEntryBlock(graph);
+ HBasicBlock* if_block = CreateIfBlock(graph);
+ HBasicBlock* if_false = CreateGotoBlock(graph);
+ HBasicBlock* return_block = CreateReturnBlock(graph);
+ HBasicBlock* exit_block = CreateExitBlock(graph);
entry_block->AddSuccessor(if_block);
if_block->AddSuccessor(return_block);
@@ -138,15 +141,12 @@
// Test that the successors of an if block stay consistent after a SimplifyCFG.
// This test sets the true block to be the loop header.
-TEST(GraphTest, IfSuccessorMultipleBackEdges1) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
- HBasicBlock* if_block = createIfBlock(graph, &allocator);
- HBasicBlock* return_block = createReturnBlock(graph, &allocator);
- HBasicBlock* exit_block = createExitBlock(graph, &allocator);
+TEST_F(GraphTest, IfSuccessorMultipleBackEdges1) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry_block = CreateEntryBlock(graph);
+ HBasicBlock* if_block = CreateIfBlock(graph);
+ HBasicBlock* return_block = CreateReturnBlock(graph);
+ HBasicBlock* exit_block = CreateExitBlock(graph);
entry_block->AddSuccessor(if_block);
if_block->AddSuccessor(if_block);
@@ -173,15 +173,12 @@
// Test that the successors of an if block stay consistent after a SimplifyCFG.
// This test sets the false block to be the loop header.
-TEST(GraphTest, IfSuccessorMultipleBackEdges2) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
- HBasicBlock* if_block = createIfBlock(graph, &allocator);
- HBasicBlock* return_block = createReturnBlock(graph, &allocator);
- HBasicBlock* exit_block = createExitBlock(graph, &allocator);
+TEST_F(GraphTest, IfSuccessorMultipleBackEdges2) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry_block = CreateEntryBlock(graph);
+ HBasicBlock* if_block = CreateIfBlock(graph);
+ HBasicBlock* return_block = CreateReturnBlock(graph);
+ HBasicBlock* exit_block = CreateExitBlock(graph);
entry_block->AddSuccessor(if_block);
if_block->AddSuccessor(return_block);
@@ -208,16 +205,13 @@
// Test that the successors of an if block stay consistent after a SimplifyCFG.
// This test sets the true block to be a loop header with multiple pre headers.
-TEST(GraphTest, IfSuccessorMultiplePreHeaders1) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
- HBasicBlock* first_if_block = createIfBlock(graph, &allocator);
- HBasicBlock* if_block = createIfBlock(graph, &allocator);
- HBasicBlock* loop_block = createGotoBlock(graph, &allocator);
- HBasicBlock* return_block = createReturnBlock(graph, &allocator);
+TEST_F(GraphTest, IfSuccessorMultiplePreHeaders1) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry_block = CreateEntryBlock(graph);
+ HBasicBlock* first_if_block = CreateIfBlock(graph);
+ HBasicBlock* if_block = CreateIfBlock(graph);
+ HBasicBlock* loop_block = CreateGotoBlock(graph);
+ HBasicBlock* return_block = CreateReturnBlock(graph);
entry_block->AddSuccessor(first_if_block);
first_if_block->AddSuccessor(if_block);
@@ -247,16 +241,13 @@
// Test that the successors of an if block stay consistent after a SimplifyCFG.
// This test sets the false block to be a loop header with multiple pre headers.
-TEST(GraphTest, IfSuccessorMultiplePreHeaders2) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
- HBasicBlock* first_if_block = createIfBlock(graph, &allocator);
- HBasicBlock* if_block = createIfBlock(graph, &allocator);
- HBasicBlock* loop_block = createGotoBlock(graph, &allocator);
- HBasicBlock* return_block = createReturnBlock(graph, &allocator);
+TEST_F(GraphTest, IfSuccessorMultiplePreHeaders2) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry_block = CreateEntryBlock(graph);
+ HBasicBlock* first_if_block = CreateIfBlock(graph);
+ HBasicBlock* if_block = CreateIfBlock(graph);
+ HBasicBlock* loop_block = CreateGotoBlock(graph);
+ HBasicBlock* return_block = CreateReturnBlock(graph);
entry_block->AddSuccessor(first_if_block);
first_if_block->AddSuccessor(if_block);
@@ -283,17 +274,14 @@
loop_block->GetLoopInformation()->GetPreHeader());
}
-TEST(GraphTest, InsertInstructionBefore) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* block = createGotoBlock(graph, &allocator);
+TEST_F(GraphTest, InsertInstructionBefore) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* block = CreateGotoBlock(graph);
HInstruction* got = block->GetLastInstruction();
ASSERT_TRUE(got->IsControlFlow());
// Test at the beginning of the block.
- HInstruction* first_instruction = new (&allocator) HIntConstant(4);
+ HInstruction* first_instruction = new (GetAllocator()) HIntConstant(4);
block->InsertInstructionBefore(first_instruction, got);
ASSERT_NE(first_instruction->GetId(), -1);
@@ -306,7 +294,7 @@
ASSERT_EQ(got->GetPrevious(), first_instruction);
// Test in the middle of the block.
- HInstruction* second_instruction = new (&allocator) HIntConstant(4);
+ HInstruction* second_instruction = new (GetAllocator()) HIntConstant(4);
block->InsertInstructionBefore(second_instruction, got);
ASSERT_NE(second_instruction->GetId(), -1);
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index 8ea312d..c09e5df 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -566,7 +566,7 @@
}
void GVNOptimization::Run() {
- GlobalValueNumberer gvn(graph_->GetArena(), graph_, side_effects_);
+ GlobalValueNumberer gvn(graph_->GetAllocator(), graph_, side_effects_);
gvn.Run();
}
diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc
index ac0dbee..3bf4cc3 100644
--- a/compiler/optimizing/gvn_test.cc
+++ b/compiler/optimizing/gvn_test.cc
@@ -24,77 +24,74 @@
namespace art {
-class GVNTest : public CommonCompilerTest {};
+class GVNTest : public OptimizingUnitTest {};
TEST_F(GVNTest, LocalFieldElimination) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kReference);
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(graph->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kReference);
entry->AddInstruction(parameter);
- HBasicBlock* block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
entry->AddSuccessor(block);
- block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kReference,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
- block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kReference,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
+ block->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kReference,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
+ block->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kReference,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
HInstruction* to_remove = block->GetLastInstruction();
- block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kReference,
- MemberOffset(43),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
+ block->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kReference,
+ MemberOffset(43),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
HInstruction* different_offset = block->GetLastInstruction();
// Kill the value.
- block->AddInstruction(new (&allocator) HInstanceFieldSet(parameter,
- parameter,
- nullptr,
- DataType::Type::kReference,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
- block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kReference,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
+ block->AddInstruction(new (GetAllocator()) HInstanceFieldSet(parameter,
+ parameter,
+ nullptr,
+ DataType::Type::kReference,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
+ block->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kReference,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
HInstruction* use_after_kill = block->GetLastInstruction();
- block->AddInstruction(new (&allocator) HExit());
+ block->AddInstruction(new (GetAllocator()) HExit());
ASSERT_EQ(to_remove->GetBlock(), block);
ASSERT_EQ(different_offset->GetBlock(), block);
@@ -111,36 +108,33 @@
}
TEST_F(GVNTest, GlobalFieldElimination) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kReference);
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(graph->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kReference);
entry->AddInstruction(parameter);
- HBasicBlock* block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
entry->AddSuccessor(block);
- block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
+ block->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
- block->AddInstruction(new (&allocator) HIf(block->GetLastInstruction()));
- HBasicBlock* then = new (&allocator) HBasicBlock(graph);
- HBasicBlock* else_ = new (&allocator) HBasicBlock(graph);
- HBasicBlock* join = new (&allocator) HBasicBlock(graph);
+ block->AddInstruction(new (GetAllocator()) HIf(block->GetLastInstruction()));
+ HBasicBlock* then = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* else_ = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* join = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(then);
graph->AddBlock(else_);
graph->AddBlock(join);
@@ -150,36 +144,36 @@
then->AddSuccessor(join);
else_->AddSuccessor(join);
- then->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
- then->AddInstruction(new (&allocator) HGoto());
- else_->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
- else_->AddInstruction(new (&allocator) HGoto());
- join->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
- join->AddInstruction(new (&allocator) HExit());
+ then->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
+ then->AddInstruction(new (GetAllocator()) HGoto());
+ else_->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
+ else_->AddInstruction(new (GetAllocator()) HGoto());
+ join->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
+ join->AddInstruction(new (GetAllocator()) HExit());
graph->BuildDominatorTree();
SideEffectsAnalysis side_effects(graph);
@@ -193,37 +187,34 @@
}
TEST_F(GVNTest, LoopFieldElimination) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kReference);
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(graph->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kReference);
entry->AddInstruction(parameter);
- HBasicBlock* block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
entry->AddSuccessor(block);
- block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
- block->AddInstruction(new (&allocator) HGoto());
+ block->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
+ block->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* loop_header = new (&allocator) HBasicBlock(graph);
- HBasicBlock* loop_body = new (&allocator) HBasicBlock(graph);
- HBasicBlock* exit = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* loop_header = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* loop_body = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(loop_header);
graph->AddBlock(loop_body);
@@ -233,54 +224,54 @@
loop_header->AddSuccessor(exit);
loop_body->AddSuccessor(loop_header);
- loop_header->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
+ loop_header->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
HInstruction* field_get_in_loop_header = loop_header->GetLastInstruction();
- loop_header->AddInstruction(new (&allocator) HIf(block->GetLastInstruction()));
+ loop_header->AddInstruction(new (GetAllocator()) HIf(block->GetLastInstruction()));
// Kill inside the loop body to prevent field gets inside the loop header
// and the body to be GVN'ed.
- loop_body->AddInstruction(new (&allocator) HInstanceFieldSet(parameter,
- parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
+ loop_body->AddInstruction(new (GetAllocator()) HInstanceFieldSet(parameter,
+ parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
HInstruction* field_set = loop_body->GetLastInstruction();
- loop_body->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
+ loop_body->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
HInstruction* field_get_in_loop_body = loop_body->GetLastInstruction();
- loop_body->AddInstruction(new (&allocator) HGoto());
+ loop_body->AddInstruction(new (GetAllocator()) HGoto());
- exit->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
+ exit->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
HInstruction* field_get_in_exit = exit->GetLastInstruction();
- exit->AddInstruction(new (&allocator) HExit());
+ exit->AddInstruction(new (GetAllocator()) HExit());
ASSERT_EQ(field_get_in_loop_header->GetBlock(), loop_header);
ASSERT_EQ(field_get_in_loop_body->GetBlock(), loop_body);
@@ -315,22 +306,19 @@
// Test that inner loops affect the side effects of the outer loop.
TEST_F(GVNTest, LoopSideEffects) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
static const SideEffects kCanTriggerGC = SideEffects::CanTriggerGC();
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HBasicBlock* outer_loop_header = new (&allocator) HBasicBlock(graph);
- HBasicBlock* outer_loop_body = new (&allocator) HBasicBlock(graph);
- HBasicBlock* outer_loop_exit = new (&allocator) HBasicBlock(graph);
- HBasicBlock* inner_loop_header = new (&allocator) HBasicBlock(graph);
- HBasicBlock* inner_loop_body = new (&allocator) HBasicBlock(graph);
- HBasicBlock* inner_loop_exit = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* outer_loop_header = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* outer_loop_body = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* outer_loop_exit = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* inner_loop_header = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* inner_loop_body = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* inner_loop_exit = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(outer_loop_header);
graph->AddBlock(outer_loop_body);
@@ -348,20 +336,20 @@
inner_loop_body->AddSuccessor(inner_loop_header);
inner_loop_exit->AddSuccessor(outer_loop_header);
- HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kBool);
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(graph->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kBool);
entry->AddInstruction(parameter);
- entry->AddInstruction(new (&allocator) HGoto());
- outer_loop_header->AddInstruction(new (&allocator) HSuspendCheck());
- outer_loop_header->AddInstruction(new (&allocator) HIf(parameter));
- outer_loop_body->AddInstruction(new (&allocator) HGoto());
- inner_loop_header->AddInstruction(new (&allocator) HSuspendCheck());
- inner_loop_header->AddInstruction(new (&allocator) HIf(parameter));
- inner_loop_body->AddInstruction(new (&allocator) HGoto());
- inner_loop_exit->AddInstruction(new (&allocator) HGoto());
- outer_loop_exit->AddInstruction(new (&allocator) HExit());
+ entry->AddInstruction(new (GetAllocator()) HGoto());
+ outer_loop_header->AddInstruction(new (GetAllocator()) HSuspendCheck());
+ outer_loop_header->AddInstruction(new (GetAllocator()) HIf(parameter));
+ outer_loop_body->AddInstruction(new (GetAllocator()) HGoto());
+ inner_loop_header->AddInstruction(new (GetAllocator()) HSuspendCheck());
+ inner_loop_header->AddInstruction(new (GetAllocator()) HIf(parameter));
+ inner_loop_body->AddInstruction(new (GetAllocator()) HGoto());
+ inner_loop_exit->AddInstruction(new (GetAllocator()) HGoto());
+ outer_loop_exit->AddInstruction(new (GetAllocator()) HExit());
graph->BuildDominatorTree();
@@ -371,16 +359,16 @@
// Check that the only side effect of loops is to potentially trigger GC.
{
// Make one block with a side effect.
- entry->AddInstruction(new (&allocator) HInstanceFieldSet(parameter,
- parameter,
- nullptr,
- DataType::Type::kReference,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
+ entry->AddInstruction(new (GetAllocator()) HInstanceFieldSet(parameter,
+ parameter,
+ nullptr,
+ DataType::Type::kReference,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
SideEffectsAnalysis side_effects(graph);
side_effects.Run();
@@ -396,16 +384,16 @@
// Check that the side effects of the outer loop does not affect the inner loop.
{
outer_loop_body->InsertInstructionBefore(
- new (&allocator) HInstanceFieldSet(parameter,
- parameter,
- nullptr,
- DataType::Type::kReference,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0),
+ new (GetAllocator()) HInstanceFieldSet(parameter,
+ parameter,
+ nullptr,
+ DataType::Type::kReference,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0),
outer_loop_body->GetLastInstruction());
SideEffectsAnalysis side_effects(graph);
@@ -422,16 +410,16 @@
{
outer_loop_body->RemoveInstruction(outer_loop_body->GetFirstInstruction());
inner_loop_body->InsertInstructionBefore(
- new (&allocator) HInstanceFieldSet(parameter,
- parameter,
- nullptr,
- DataType::Type::kReference,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0),
+ new (GetAllocator()) HInstanceFieldSet(parameter,
+ parameter,
+ nullptr,
+ DataType::Type::kReference,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0),
inner_loop_body->GetLastInstruction());
SideEffectsAnalysis side_effects(graph);
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index eab17aa..0987293 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -100,17 +100,17 @@
HInductionVarAnalysis::HInductionVarAnalysis(HGraph* graph)
: HOptimization(graph, kInductionPassName),
global_depth_(0),
- stack_(graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
+ stack_(graph->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)),
map_(std::less<HInstruction*>(),
- graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
- scc_(graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
+ graph->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)),
+ scc_(graph->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)),
cycle_(std::less<HInstruction*>(),
- graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
+ graph->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)),
type_(DataType::Type::kVoid),
induction_(std::less<HLoopInformation*>(),
- graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
+ graph->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)),
cycles_(std::less<HPhi*>(),
- graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)) {
+ graph->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)) {
}
void HInductionVarAnalysis::Run() {
@@ -265,7 +265,8 @@
// Rotate proper loop-phi to front.
if (size > 1) {
- ArenaVector<HInstruction*> other(graph_->GetArena()->Adapter(kArenaAllocInductionVarAnalysis));
+ ArenaVector<HInstruction*> other(
+ graph_->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis));
RotateEntryPhiFirst(loop, &scc_, &other);
}
@@ -991,7 +992,7 @@
it = induction_.Put(loop,
ArenaSafeMap<HInstruction*, InductionInfo*>(
std::less<HInstruction*>(),
- graph_->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)));
+ graph_->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)));
}
it->second.Put(instruction, info);
}
@@ -1082,7 +1083,7 @@
return CreateSimplifiedInvariant(kSub, b->op_b, b->op_a);
}
}
- return new (graph_->GetArena()) InductionInfo(
+ return new (graph_->GetAllocator()) InductionInfo(
kInvariant, op, a, b, nullptr, ImplicitConversion(b->type));
}
@@ -1119,7 +1120,7 @@
void HInductionVarAnalysis::AssignCycle(HPhi* phi) {
ArenaSet<HInstruction*>* set = &cycles_.Put(phi, ArenaSet<HInstruction*>(
- graph_->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)))->second;
+ graph_->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)))->second;
for (HInstruction* i : scc_) {
set->insert(i);
}
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
index 421b3ab..a2d302a 100644
--- a/compiler/optimizing/induction_var_analysis.h
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -129,7 +129,7 @@
InductionInfo* CreateInvariantFetch(HInstruction* f) {
DCHECK(f != nullptr);
- return new (graph_->GetArena())
+ return new (graph_->GetAllocator())
InductionInfo(kInvariant, kFetch, nullptr, nullptr, f, f->GetType());
}
@@ -138,7 +138,7 @@
InductionInfo* b,
DataType::Type type) {
DCHECK(a != nullptr && b != nullptr);
- return new (graph_->GetArena()) InductionInfo(kInvariant, op, a, b, nullptr, type);
+ return new (graph_->GetAllocator()) InductionInfo(kInvariant, op, a, b, nullptr, type);
}
InductionInfo* CreateInduction(InductionClass ic,
@@ -148,7 +148,7 @@
HInstruction* f,
DataType::Type type) {
DCHECK(a != nullptr && b != nullptr);
- return new (graph_->GetArena()) InductionInfo(ic, op, a, b, f, type);
+ return new (graph_->GetAllocator()) InductionInfo(ic, op, a, b, f, type);
}
// Methods for analysis.
diff --git a/compiler/optimizing/induction_var_analysis_test.cc b/compiler/optimizing/induction_var_analysis_test.cc
index 53c8044..4c11ad4 100644
--- a/compiler/optimizing/induction_var_analysis_test.cc
+++ b/compiler/optimizing/induction_var_analysis_test.cc
@@ -27,12 +27,10 @@
/**
* Fixture class for the InductionVarAnalysis tests.
*/
-class InductionVarAnalysisTest : public CommonCompilerTest {
+class InductionVarAnalysisTest : public OptimizingUnitTest {
public:
InductionVarAnalysisTest()
- : pool_(),
- allocator_(&pool_),
- iva_(nullptr),
+ : iva_(nullptr),
entry_(nullptr),
return_(nullptr),
exit_(nullptr),
@@ -44,7 +42,7 @@
constant100_(nullptr),
constantm1_(nullptr),
float_constant0_(nullptr) {
- graph_ = CreateGraph(&allocator_);
+ graph_ = CreateGraph();
}
~InductionVarAnalysisTest() { }
@@ -52,15 +50,15 @@
// Builds single for-loop at depth d.
void BuildForLoop(int d, int n) {
ASSERT_LT(d, n);
- loop_preheader_[d] = new (&allocator_) HBasicBlock(graph_);
+ loop_preheader_[d] = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(loop_preheader_[d]);
- loop_header_[d] = new (&allocator_) HBasicBlock(graph_);
+ loop_header_[d] = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(loop_header_[d]);
loop_preheader_[d]->AddSuccessor(loop_header_[d]);
if (d < (n - 1)) {
BuildForLoop(d + 1, n);
}
- loop_body_[d] = new (&allocator_) HBasicBlock(graph_);
+ loop_body_[d] = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(loop_body_[d]);
loop_body_[d]->AddSuccessor(loop_header_[d]);
if (d < (n - 1)) {
@@ -79,12 +77,12 @@
graph_->SetNumberOfVRegs(n + 3);
// Build basic blocks with entry, nested loop, exit.
- entry_ = new (&allocator_) HBasicBlock(graph_);
+ entry_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry_);
BuildForLoop(0, n);
- return_ = new (&allocator_) HBasicBlock(graph_);
+ return_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(return_);
- exit_ = new (&allocator_) HBasicBlock(graph_);
+ exit_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(exit_);
entry_->AddSuccessor(loop_preheader_[0]);
loop_header_[0]->AddSuccessor(return_);
@@ -93,7 +91,7 @@
graph_->SetExitBlock(exit_);
// Provide entry and exit instructions.
- parameter_ = new (&allocator_) HParameterValue(
+ parameter_ = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference, true);
entry_->AddInstruction(parameter_);
constant0_ = graph_->GetIntConstant(0);
@@ -103,20 +101,20 @@
constant100_ = graph_->GetIntConstant(100);
constantm1_ = graph_->GetIntConstant(-1);
float_constant0_ = graph_->GetFloatConstant(0.0f);
- return_->AddInstruction(new (&allocator_) HReturnVoid());
- exit_->AddInstruction(new (&allocator_) HExit());
+ return_->AddInstruction(new (GetAllocator()) HReturnVoid());
+ exit_->AddInstruction(new (GetAllocator()) HExit());
// Provide loop instructions.
for (int d = 0; d < n; d++) {
- basic_[d] = new (&allocator_) HPhi(&allocator_, d, 0, DataType::Type::kInt32);
- loop_preheader_[d]->AddInstruction(new (&allocator_) HGoto());
+ basic_[d] = new (GetAllocator()) HPhi(GetAllocator(), d, 0, DataType::Type::kInt32);
+ loop_preheader_[d]->AddInstruction(new (GetAllocator()) HGoto());
loop_header_[d]->AddPhi(basic_[d]);
- HInstruction* compare = new (&allocator_) HLessThan(basic_[d], constant100_);
+ HInstruction* compare = new (GetAllocator()) HLessThan(basic_[d], constant100_);
loop_header_[d]->AddInstruction(compare);
- loop_header_[d]->AddInstruction(new (&allocator_) HIf(compare));
- increment_[d] = new (&allocator_) HAdd(DataType::Type::kInt32, basic_[d], constant1_);
+ loop_header_[d]->AddInstruction(new (GetAllocator()) HIf(compare));
+ increment_[d] = new (GetAllocator()) HAdd(DataType::Type::kInt32, basic_[d], constant1_);
loop_body_[d]->AddInstruction(increment_[d]);
- loop_body_[d]->AddInstruction(new (&allocator_) HGoto());
+ loop_body_[d]->AddInstruction(new (GetAllocator()) HGoto());
basic_[d]->AddInput(constant0_);
basic_[d]->AddInput(increment_[d]);
@@ -125,9 +123,9 @@
// Builds if-statement at depth d.
HPhi* BuildIf(int d, HBasicBlock** ifT, HBasicBlock** ifF) {
- HBasicBlock* cond = new (&allocator_) HBasicBlock(graph_);
- HBasicBlock* ifTrue = new (&allocator_) HBasicBlock(graph_);
- HBasicBlock* ifFalse = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* cond = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* ifTrue = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* ifFalse = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(cond);
graph_->AddBlock(ifTrue);
graph_->AddBlock(ifFalse);
@@ -137,11 +135,11 @@
cond->AddSuccessor(ifFalse);
ifTrue->AddSuccessor(loop_body_[d]);
ifFalse->AddSuccessor(loop_body_[d]);
- cond->AddInstruction(new (&allocator_) HIf(parameter_));
+ cond->AddInstruction(new (GetAllocator()) HIf(parameter_));
*ifT = ifTrue;
*ifF = ifFalse;
- HPhi* select_phi = new (&allocator_) HPhi(&allocator_, -1, 0, DataType::Type::kInt32);
+ HPhi* select_phi = new (GetAllocator()) HPhi(GetAllocator(), -1, 0, DataType::Type::kInt32);
loop_body_[d]->AddPhi(select_phi);
return select_phi;
}
@@ -154,7 +152,7 @@
// Inserts a phi to loop header at depth d and returns it.
HPhi* InsertLoopPhi(int vreg, int d) {
- HPhi* phi = new (&allocator_) HPhi(&allocator_, vreg, 0, DataType::Type::kInt32);
+ HPhi* phi = new (GetAllocator()) HPhi(GetAllocator(), vreg, 0, DataType::Type::kInt32);
loop_header_[d]->AddPhi(phi);
return phi;
}
@@ -164,7 +162,7 @@
HInstruction* InsertArrayStore(HInstruction* subscript, int d) {
// ArraySet is given a float value in order to avoid SsaBuilder typing
// it from the array's non-existent reference type info.
- return InsertInstruction(new (&allocator_) HArraySet(
+ return InsertInstruction(new (GetAllocator()) HArraySet(
parameter_, subscript, float_constant0_, DataType::Type::kFloat32, 0), d);
}
@@ -197,13 +195,11 @@
// Performs InductionVarAnalysis (after proper set up).
void PerformInductionVarAnalysis() {
graph_->BuildDominatorTree();
- iva_ = new (&allocator_) HInductionVarAnalysis(graph_);
+ iva_ = new (GetAllocator()) HInductionVarAnalysis(graph_);
iva_->Run();
}
// General building fields.
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
HInductionVarAnalysis* iva_;
@@ -286,15 +282,15 @@
// }
BuildLoopNest(1);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, constant100_, basic_[0]), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, constant100_, basic_[0]), 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, constant100_, basic_[0]), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, constant100_, basic_[0]), 0);
HInstruction* mul = InsertInstruction(
- new (&allocator_) HMul(DataType::Type::kInt32, constant100_, basic_[0]), 0);
+ new (GetAllocator()) HMul(DataType::Type::kInt32, constant100_, basic_[0]), 0);
HInstruction* shl = InsertInstruction(
- new (&allocator_) HShl(DataType::Type::kInt32, basic_[0], constant1_), 0);
+ new (GetAllocator()) HShl(DataType::Type::kInt32, basic_[0], constant1_), 0);
HInstruction* neg = InsertInstruction(
- new (&allocator_) HNeg(DataType::Type::kInt32, basic_[0]), 0);
+ new (GetAllocator()) HNeg(DataType::Type::kInt32, basic_[0]), 0);
PerformInductionVarAnalysis();
EXPECT_STREQ("((1) * i + (100)):Int32", GetInductionInfo(add, 0).c_str());
@@ -318,10 +314,10 @@
k_header->AddInput(constant0_);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
HInstruction* store1 = InsertArrayStore(add, 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, add, constant1_), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, add, constant1_), 0);
HInstruction* store2 = InsertArrayStore(sub, 0);
k_header->AddInput(sub);
PerformInductionVarAnalysis();
@@ -351,11 +347,11 @@
HPhi* k_body = BuildIf(0, &ifTrue, &ifFalse);
// True-branch.
- HInstruction* inc1 = new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant1_);
+ HInstruction* inc1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant1_);
ifTrue->AddInstruction(inc1);
k_body->AddInput(inc1);
// False-branch.
- HInstruction* inc2 = new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant1_);
+ HInstruction* inc2 = new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant1_);
ifFalse->AddInstruction(inc2);
k_body->AddInput(inc2);
// Merge over a phi.
@@ -384,11 +380,11 @@
HPhi* k = BuildIf(0, &ifTrue, &ifFalse);
// True-branch.
- HInstruction* inc1 = new (&allocator_) HAdd(DataType::Type::kInt32, basic_[0], constant1_);
+ HInstruction* inc1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, basic_[0], constant1_);
ifTrue->AddInstruction(inc1);
k->AddInput(inc1);
// False-branch.
- HInstruction* inc2 = new (&allocator_) HAdd(DataType::Type::kInt32, basic_[0], constant1_);
+ HInstruction* inc2 = new (GetAllocator()) HAdd(DataType::Type::kInt32, basic_[0], constant1_);
ifFalse->AddInstruction(inc2);
k->AddInput(inc2);
// Merge over a phi.
@@ -412,11 +408,11 @@
BuildLoopNest(1);
HInstruction* add1 = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, basic_[0], basic_[0]), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, basic_[0], basic_[0]), 0);
HInstruction* add2 = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, constant7_, basic_[0]), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, constant7_, basic_[0]), 0);
HInstruction* add3 = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, add1, add2), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, add1, add2), 0);
PerformInductionVarAnalysis();
EXPECT_STREQ("((1) * i + (0)):Int32", GetInductionInfo(basic_[0], 0).c_str());
@@ -438,11 +434,11 @@
k_header->AddInput(constant1_);
HInstruction* mul = InsertInstruction(
- new (&allocator_) HMul(DataType::Type::kInt32, basic_[0], constant2_), 0);
+ new (GetAllocator()) HMul(DataType::Type::kInt32, basic_[0], constant2_), 0);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, constant100_, mul), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, constant100_, mul), 0);
HInstruction* pol = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, add, k_header), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, add, k_header), 0);
k_header->AddInput(pol);
PerformInductionVarAnalysis();
@@ -469,17 +465,17 @@
k_header->AddInput(constant1_);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, k_header, constant1_), 0);
HInstruction* neg = InsertInstruction(
- new (&allocator_) HNeg(DataType::Type::kInt32, sub), 0);
+ new (GetAllocator()) HNeg(DataType::Type::kInt32, sub), 0);
HInstruction* mul = InsertInstruction(
- new (&allocator_) HMul(DataType::Type::kInt32, k_header, constant2_), 0);
+ new (GetAllocator()) HMul(DataType::Type::kInt32, k_header, constant2_), 0);
HInstruction* shl = InsertInstruction(
- new (&allocator_) HShl(DataType::Type::kInt32, k_header, constant2_), 0);
+ new (GetAllocator()) HShl(DataType::Type::kInt32, k_header, constant2_), 0);
HInstruction* pol = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, basic_[0]), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, basic_[0]), 0);
k_header->AddInput(pol);
PerformInductionVarAnalysis();
@@ -512,11 +508,11 @@
k_header->AddInput(constant7_);
HInstruction* add1 = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, k_header), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, k_header), 0);
HInstruction* add2 = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, add1, k_header), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, add1, k_header), 0);
HInstruction* add3 = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, basic_[0]), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, basic_[0]), 0);
k_header->AddInput(add3);
PerformInductionVarAnalysis();
@@ -542,7 +538,7 @@
k_header->AddInput(constant1_);
HInstruction* mul = InsertInstruction(
- new (&allocator_) HMul(DataType::Type::kInt32, k_header, constant100_), 0);
+ new (GetAllocator()) HMul(DataType::Type::kInt32, k_header, constant100_), 0);
k_header->AddInput(mul);
PerformInductionVarAnalysis();
@@ -567,19 +563,19 @@
k_header->AddInput(constant1_);
HInstruction* add1 = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant1_), 0);
HInstruction* shl1 = InsertInstruction(
- new (&allocator_) HShl(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HShl(DataType::Type::kInt32, k_header, constant1_), 0);
HInstruction* add2 = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, shl1, constant100_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, shl1, constant100_), 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, shl1, constant1_), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, shl1, constant1_), 0);
HInstruction* neg = InsertInstruction(
- new (&allocator_) HNeg(DataType::Type::kInt32, sub), 0);
+ new (GetAllocator()) HNeg(DataType::Type::kInt32, sub), 0);
HInstruction* mul = InsertInstruction(
- new (&allocator_) HMul(DataType::Type::kInt32, shl1, constant2_), 0);
+ new (GetAllocator()) HMul(DataType::Type::kInt32, shl1, constant2_), 0);
HInstruction* shl2 = InsertInstruction(
- new (&allocator_) HShl(DataType::Type::kInt32, shl1, constant2_), 0);
+ new (GetAllocator()) HShl(DataType::Type::kInt32, shl1, constant2_), 0);
k_header->AddInput(shl1);
PerformInductionVarAnalysis();
@@ -610,17 +606,17 @@
k_header->AddInput(constant1_);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, k_header, constant1_), 0);
HInstruction* neg = InsertInstruction(
- new (&allocator_) HNeg(DataType::Type::kInt32, sub), 0);
+ new (GetAllocator()) HNeg(DataType::Type::kInt32, sub), 0);
HInstruction* mul = InsertInstruction(
- new (&allocator_) HMul(DataType::Type::kInt32, k_header, constant2_), 0);
+ new (GetAllocator()) HMul(DataType::Type::kInt32, k_header, constant2_), 0);
HInstruction* shl = InsertInstruction(
- new (&allocator_) HShl(DataType::Type::kInt32, k_header, constant2_), 0);
+ new (GetAllocator()) HShl(DataType::Type::kInt32, k_header, constant2_), 0);
HInstruction* div = InsertInstruction(
- new (&allocator_) HDiv(DataType::Type::kInt32, k_header, constant100_, kNoDexPc), 0);
+ new (GetAllocator()) HDiv(DataType::Type::kInt32, k_header, constant100_, kNoDexPc), 0);
k_header->AddInput(div);
PerformInductionVarAnalysis();
@@ -645,7 +641,7 @@
k_header->AddInput(constant100_);
HInstruction* shr = InsertInstruction(
- new (&allocator_) HShr(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HShr(DataType::Type::kInt32, k_header, constant1_), 0);
k_header->AddInput(shr);
PerformInductionVarAnalysis();
@@ -665,7 +661,7 @@
k_header->AddInput(constantm1_);
HInstruction* shr = InsertInstruction(
- new (&allocator_) HShr(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HShr(DataType::Type::kInt32, k_header, constant1_), 0);
k_header->AddInput(shr);
PerformInductionVarAnalysis();
@@ -689,17 +685,17 @@
k_header->AddInput(constant100_);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, k_header, constant1_), 0);
HInstruction* neg = InsertInstruction(
- new (&allocator_) HNeg(DataType::Type::kInt32, sub), 0);
+ new (GetAllocator()) HNeg(DataType::Type::kInt32, sub), 0);
HInstruction* mul = InsertInstruction(
- new (&allocator_) HMul(DataType::Type::kInt32, k_header, constant2_), 0);
+ new (GetAllocator()) HMul(DataType::Type::kInt32, k_header, constant2_), 0);
HInstruction* shl = InsertInstruction(
- new (&allocator_) HShl(DataType::Type::kInt32, k_header, constant2_), 0);
+ new (GetAllocator()) HShl(DataType::Type::kInt32, k_header, constant2_), 0);
HInstruction* rem = InsertInstruction(
- new (&allocator_) HRem(DataType::Type::kInt32, k_header, constant7_, kNoDexPc), 0);
+ new (GetAllocator()) HRem(DataType::Type::kInt32, k_header, constant7_, kNoDexPc), 0);
k_header->AddInput(rem);
PerformInductionVarAnalysis();
@@ -731,7 +727,7 @@
HInstruction* store = InsertArrayStore(k_header, 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, constant100_, basic_[0]), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, constant100_, basic_[0]), 0);
k_header->AddInput(sub);
PerformInductionVarAnalysis();
@@ -760,7 +756,7 @@
HInstruction* store = InsertArrayStore(k_header, 0);
k_header->AddInput(t);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, constant100_, basic_[0], 0), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, constant100_, basic_[0], 0), 0);
t->AddInput(sub);
PerformInductionVarAnalysis();
@@ -785,19 +781,19 @@
k_header->AddInput(constant0_);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, k_header, constant100_), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, k_header, constant100_), 0);
HInstruction* mul = InsertInstruction(
- new (&allocator_) HMul(DataType::Type::kInt32, k_header, constant100_), 0);
+ new (GetAllocator()) HMul(DataType::Type::kInt32, k_header, constant100_), 0);
HInstruction* shl1 = InsertInstruction(
- new (&allocator_) HShl(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HShl(DataType::Type::kInt32, k_header, constant1_), 0);
HInstruction* neg1 = InsertInstruction(
- new (&allocator_) HNeg(DataType::Type::kInt32, k_header), 0);
+ new (GetAllocator()) HNeg(DataType::Type::kInt32, k_header), 0);
HInstruction* shl2 = InsertInstruction(
- new (&allocator_) HShl(DataType::Type::kInt32, basic_[0], constant1_), 0);
+ new (GetAllocator()) HShl(DataType::Type::kInt32, basic_[0], constant1_), 0);
HInstruction* neg2 = InsertInstruction(
- new (&allocator_) HNeg(DataType::Type::kInt32, shl2), 0);
+ new (GetAllocator()) HNeg(DataType::Type::kInt32, shl2), 0);
k_header->AddInput(shl2);
PerformInductionVarAnalysis();
@@ -856,7 +852,7 @@
HInstruction* store = InsertArrayStore(k_header, 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, constant1_, k_header), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, constant1_, k_header), 0);
k_header->AddInput(sub);
PerformInductionVarAnalysis();
@@ -877,7 +873,7 @@
HInstruction* store = InsertArrayStore(k_header, 0);
HInstruction* x = InsertInstruction(
- new (&allocator_) HXor(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HXor(DataType::Type::kInt32, k_header, constant1_), 0);
k_header->AddInput(x);
PerformInductionVarAnalysis();
@@ -896,7 +892,7 @@
k_header->AddInput(constant1_);
HInstruction* x = InsertInstruction(
- new (&allocator_) HXor(DataType::Type::kInt32, constant1_, k_header), 0);
+ new (GetAllocator()) HXor(DataType::Type::kInt32, constant1_, k_header), 0);
k_header->AddInput(x);
PerformInductionVarAnalysis();
@@ -915,7 +911,7 @@
k_header->AddInput(constant1_);
HInstruction* x = InsertInstruction(
- new (&allocator_) HXor(DataType::Type::kInt32, k_header, constant100_), 0);
+ new (GetAllocator()) HXor(DataType::Type::kInt32, k_header, constant100_), 0);
k_header->AddInput(x);
PerformInductionVarAnalysis();
@@ -933,7 +929,7 @@
HPhi* k_header = InsertLoopPhi(0, 0);
k_header->AddInput(constant0_);
- HInstruction* x = InsertInstruction(new (&allocator_) HEqual(k_header, constant0_), 0);
+ HInstruction* x = InsertInstruction(new (GetAllocator()) HEqual(k_header, constant0_), 0);
k_header->AddInput(x);
PerformInductionVarAnalysis();
@@ -951,7 +947,7 @@
HPhi* k_header = InsertLoopPhi(0, 0);
k_header->AddInput(constant0_);
- HInstruction* x = InsertInstruction(new (&allocator_) HEqual(constant0_, k_header), 0);
+ HInstruction* x = InsertInstruction(new (GetAllocator()) HEqual(constant0_, k_header), 0);
k_header->AddInput(x);
PerformInductionVarAnalysis();
@@ -969,7 +965,7 @@
HPhi* k_header = InsertLoopPhi(0, 0);
k_header->AddInput(constant0_);
- HInstruction* x = InsertInstruction(new (&allocator_) HNotEqual(k_header, constant1_), 0);
+ HInstruction* x = InsertInstruction(new (GetAllocator()) HNotEqual(k_header, constant1_), 0);
k_header->AddInput(x);
PerformInductionVarAnalysis();
@@ -987,7 +983,7 @@
HPhi* k_header = InsertLoopPhi(0, 0);
k_header->AddInput(constant0_);
- HInstruction* x = InsertInstruction(new (&allocator_) HNotEqual(constant1_, k_header), 0);
+ HInstruction* x = InsertInstruction(new (GetAllocator()) HNotEqual(constant1_, k_header), 0);
k_header->AddInput(x);
PerformInductionVarAnalysis();
@@ -1012,19 +1008,19 @@
k_header->AddInput(constant0_);
HInstruction* neg1 = InsertInstruction(
- new (&allocator_) HNeg(DataType::Type::kInt32, k_header), 0);
+ new (GetAllocator()) HNeg(DataType::Type::kInt32, k_header), 0);
HInstruction* idiom = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, constant1_, k_header), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, constant1_, k_header), 0);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, idiom, constant100_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, idiom, constant100_), 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, idiom, constant100_), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, idiom, constant100_), 0);
HInstruction* mul = InsertInstruction(
- new (&allocator_) HMul(DataType::Type::kInt32, idiom, constant100_), 0);
+ new (GetAllocator()) HMul(DataType::Type::kInt32, idiom, constant100_), 0);
HInstruction* shl = InsertInstruction(
- new (&allocator_) HShl(DataType::Type::kInt32, idiom, constant1_), 0);
+ new (GetAllocator()) HShl(DataType::Type::kInt32, idiom, constant1_), 0);
HInstruction* neg2 = InsertInstruction(
- new (&allocator_) HNeg(DataType::Type::kInt32, idiom), 0);
+ new (GetAllocator()) HNeg(DataType::Type::kInt32, idiom), 0);
k_header->AddInput(idiom);
PerformInductionVarAnalysis();
@@ -1057,7 +1053,7 @@
}
HInstruction* inc = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, constant1_, k_header[9]), 9);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, constant1_, k_header[9]), 9);
HInstruction* store = InsertArrayStore(inc, 9);
for (int d = 0; d < 10; d++) {
@@ -1091,7 +1087,7 @@
// }
BuildLoopNest(1);
HInstruction* conv = InsertInstruction(
- new (&allocator_) HTypeConversion(DataType::Type::kInt8, basic_[0], kNoDexPc), 0);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kInt8, basic_[0], kNoDexPc), 0);
HInstruction* store1 = InsertArrayStore(conv, 0);
HInstruction* store2 = InsertArrayStore(basic_[0], 0);
PerformInductionVarAnalysis();
@@ -1122,10 +1118,10 @@
// }
BuildLoopNest(1);
HInstruction* conv = InsertInstruction(
- new (&allocator_) HTypeConversion(DataType::Type::kInt8, basic_[0], kNoDexPc), 0);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kInt8, basic_[0], kNoDexPc), 0);
HInstruction* store1 = InsertArrayStore(conv, 0);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, conv, constant1_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, conv, constant1_), 0);
HInstruction* store2 = InsertArrayStore(add, 0);
PerformInductionVarAnalysis();
@@ -1152,9 +1148,9 @@
k_header->AddInput(graph_->GetIntConstant(-128));
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant1_), 0);
HInstruction* conv = InsertInstruction(
- new (&allocator_) HTypeConversion(DataType::Type::kInt8, add, kNoDexPc), 0);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kInt8, add, kNoDexPc), 0);
k_header->AddInput(conv);
PerformInductionVarAnalysis();
@@ -1180,9 +1176,9 @@
k_header->AddInput(graph_->GetIntConstant(-129));
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant1_), 0);
HInstruction* conv = InsertInstruction(
- new (&allocator_) HTypeConversion(DataType::Type::kInt8, add, kNoDexPc), 0);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kInt8, add, kNoDexPc), 0);
k_header->AddInput(conv);
PerformInductionVarAnalysis();
@@ -1202,9 +1198,9 @@
k_header->AddInput(constant0_);
HInstruction* conv = InsertInstruction(
- new (&allocator_) HTypeConversion(DataType::Type::kInt8, k_header, kNoDexPc), 0);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kInt8, k_header, kNoDexPc), 0);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, conv, constant1_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, conv, constant1_), 0);
k_header->AddInput(add);
PerformInductionVarAnalysis();
@@ -1221,7 +1217,7 @@
HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious();
ifs->ReplaceInput(graph_->GetIntConstant(127), 1);
HInstruction* conv =
- new (&allocator_) HTypeConversion(DataType::Type::kInt8, increment_[0], kNoDexPc);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kInt8, increment_[0], kNoDexPc);
loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext());
basic_[0]->ReplaceInput(conv, 1);
PerformInductionVarAnalysis();
@@ -1247,7 +1243,7 @@
HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious();
ifs->ReplaceInput(graph_->GetIntConstant(128), 1);
HInstruction* conv =
- new (&allocator_) HTypeConversion(DataType::Type::kInt8, increment_[0], kNoDexPc);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kInt8, increment_[0], kNoDexPc);
loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext());
basic_[0]->ReplaceInput(conv, 1);
PerformInductionVarAnalysis();
@@ -1273,7 +1269,7 @@
HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious();
ifs->ReplaceInput(graph_->GetIntConstant(32767), 1);
HInstruction* conv =
- new (&allocator_) HTypeConversion(DataType::Type::kInt16, increment_[0], kNoDexPc);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kInt16, increment_[0], kNoDexPc);
loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext());
basic_[0]->ReplaceInput(conv, 1);
PerformInductionVarAnalysis();
@@ -1299,7 +1295,7 @@
HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious();
ifs->ReplaceInput(graph_->GetIntConstant(32768), 1);
HInstruction* conv =
- new (&allocator_) HTypeConversion(DataType::Type::kInt16, increment_[0], kNoDexPc);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kInt16, increment_[0], kNoDexPc);
loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext());
basic_[0]->ReplaceInput(conv, 1);
PerformInductionVarAnalysis();
@@ -1324,7 +1320,7 @@
HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious();
ifs->ReplaceInput(graph_->GetIntConstant(65535), 1);
HInstruction* conv =
- new (&allocator_) HTypeConversion(DataType::Type::kUint16, increment_[0], kNoDexPc);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kUint16, increment_[0], kNoDexPc);
loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext());
basic_[0]->ReplaceInput(conv, 1);
PerformInductionVarAnalysis();
@@ -1349,7 +1345,7 @@
HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious();
ifs->ReplaceInput(graph_->GetIntConstant(65536), 1);
HInstruction* conv =
- new (&allocator_) HTypeConversion(DataType::Type::kUint16, increment_[0], kNoDexPc);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kUint16, increment_[0], kNoDexPc);
loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext());
basic_[0]->ReplaceInput(conv, 1);
PerformInductionVarAnalysis();
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index ab6fbae..99dec11 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -418,7 +418,8 @@
if (GenerateCode(trip->op_a, nullptr, graph, block, &trip_expr, false, false)) {
if (taken_test != nullptr) {
HInstruction* zero = graph->GetConstant(trip->type, 0);
- trip_expr = Insert(block, new (graph->GetArena()) HSelect(taken_test, trip_expr, zero, kNoDexPc));
+ ArenaAllocator* allocator = graph->GetAllocator();
+ trip_expr = Insert(block, new (allocator) HSelect(taken_test, trip_expr, zero, kNoDexPc));
}
return trip_expr;
}
@@ -1059,7 +1060,7 @@
sum = static_cast<int32_t>(sum); // okay to truncate
}
*result =
- Insert(block, new (graph->GetArena()) HAdd(type, graph->GetConstant(type, sum), c));
+ Insert(block, new (graph->GetAllocator()) HAdd(type, graph->GetConstant(type, sum), c));
}
return true;
}
@@ -1104,12 +1105,13 @@
} else {
// Last value: a * f ^ m + b or a * f ^ -m + b.
HInstruction* e = nullptr;
+ ArenaAllocator* allocator = graph->GetAllocator();
if (info->operation == HInductionVarAnalysis::kMul) {
- e = new (graph->GetArena()) HMul(type, opa, graph->GetConstant(type, fpow));
+ e = new (allocator) HMul(type, opa, graph->GetConstant(type, fpow));
} else {
- e = new (graph->GetArena()) HDiv(type, opa, graph->GetConstant(type, fpow), kNoDexPc);
+ e = new (allocator) HDiv(type, opa, graph->GetConstant(type, fpow), kNoDexPc);
}
- *result = Insert(block, new (graph->GetArena()) HAdd(type, Insert(block, e), opb));
+ *result = Insert(block, new (allocator) HAdd(type, Insert(block, e), opb));
}
}
return true;
@@ -1190,18 +1192,20 @@
// During actual code generation (graph != nullptr), generate is_even ? x : y.
if (graph != nullptr) {
DataType::Type type = trip->type;
+ ArenaAllocator* allocator = graph->GetAllocator();
HInstruction* msk =
- Insert(block, new (graph->GetArena()) HAnd(type, t, graph->GetConstant(type, 1)));
+ Insert(block, new (allocator) HAnd(type, t, graph->GetConstant(type, 1)));
HInstruction* is_even =
- Insert(block, new (graph->GetArena()) HEqual(msk, graph->GetConstant(type, 0), kNoDexPc));
- *result = Insert(block, new (graph->GetArena()) HSelect(is_even, x, y, kNoDexPc));
+ Insert(block, new (allocator) HEqual(msk, graph->GetConstant(type, 0), kNoDexPc));
+ *result = Insert(block, new (graph->GetAllocator()) HSelect(is_even, x, y, kNoDexPc));
}
// Guard select with taken test if needed.
if (*needs_taken_test) {
HInstruction* is_taken = nullptr;
if (GenerateCode(trip->op_b, nullptr, graph, block, graph ? &is_taken : nullptr, false, false)) {
if (graph != nullptr) {
- *result = Insert(block, new (graph->GetArena()) HSelect(is_taken, *result, x, kNoDexPc));
+ ArenaAllocator* allocator = graph->GetAllocator();
+ *result = Insert(block, new (allocator) HSelect(is_taken, *result, x, kNoDexPc));
}
*needs_taken_test = false; // taken care of
} else {
@@ -1250,25 +1254,25 @@
HInstruction* operation = nullptr;
switch (info->operation) {
case HInductionVarAnalysis::kAdd:
- operation = new (graph->GetArena()) HAdd(type, opa, opb); break;
+ operation = new (graph->GetAllocator()) HAdd(type, opa, opb); break;
case HInductionVarAnalysis::kSub:
- operation = new (graph->GetArena()) HSub(type, opa, opb); break;
+ operation = new (graph->GetAllocator()) HSub(type, opa, opb); break;
case HInductionVarAnalysis::kMul:
- operation = new (graph->GetArena()) HMul(type, opa, opb, kNoDexPc); break;
+ operation = new (graph->GetAllocator()) HMul(type, opa, opb, kNoDexPc); break;
case HInductionVarAnalysis::kDiv:
- operation = new (graph->GetArena()) HDiv(type, opa, opb, kNoDexPc); break;
+ operation = new (graph->GetAllocator()) HDiv(type, opa, opb, kNoDexPc); break;
case HInductionVarAnalysis::kRem:
- operation = new (graph->GetArena()) HRem(type, opa, opb, kNoDexPc); break;
+ operation = new (graph->GetAllocator()) HRem(type, opa, opb, kNoDexPc); break;
case HInductionVarAnalysis::kXor:
- operation = new (graph->GetArena()) HXor(type, opa, opb); break;
+ operation = new (graph->GetAllocator()) HXor(type, opa, opb); break;
case HInductionVarAnalysis::kLT:
- operation = new (graph->GetArena()) HLessThan(opa, opb); break;
+ operation = new (graph->GetAllocator()) HLessThan(opa, opb); break;
case HInductionVarAnalysis::kLE:
- operation = new (graph->GetArena()) HLessThanOrEqual(opa, opb); break;
+ operation = new (graph->GetAllocator()) HLessThanOrEqual(opa, opb); break;
case HInductionVarAnalysis::kGT:
- operation = new (graph->GetArena()) HGreaterThan(opa, opb); break;
+ operation = new (graph->GetAllocator()) HGreaterThan(opa, opb); break;
case HInductionVarAnalysis::kGE:
- operation = new (graph->GetArena()) HGreaterThanOrEqual(opa, opb); break;
+ operation = new (graph->GetAllocator()) HGreaterThanOrEqual(opa, opb); break;
default:
LOG(FATAL) << "unknown operation";
}
@@ -1280,7 +1284,7 @@
case HInductionVarAnalysis::kNeg:
if (GenerateCode(info->op_b, trip, graph, block, &opb, in_body, !is_min)) {
if (graph != nullptr) {
- *result = Insert(block, new (graph->GetArena()) HNeg(type, opb));
+ *result = Insert(block, new (graph->GetAllocator()) HNeg(type, opb));
}
return true;
}
@@ -1306,9 +1310,9 @@
} else if (in_body) {
if (GenerateCode(info->op_a, trip, graph, block, &opb, in_body, is_min)) {
if (graph != nullptr) {
+ ArenaAllocator* allocator = graph->GetAllocator();
*result =
- Insert(block,
- new (graph->GetArena()) HSub(type, opb, graph->GetConstant(type, 1)));
+ Insert(block, new (allocator) HSub(type, opb, graph->GetConstant(type, 1)));
}
return true;
}
@@ -1333,15 +1337,16 @@
if (GenerateCode(trip, trip, graph, block, &opa, in_body, is_min_a) &&
GenerateCode(info->op_b, trip, graph, block, &opb, in_body, is_min)) {
if (graph != nullptr) {
+ ArenaAllocator* allocator = graph->GetAllocator();
HInstruction* oper;
if (stride_value == 1) {
- oper = new (graph->GetArena()) HAdd(type, opa, opb);
+ oper = new (allocator) HAdd(type, opa, opb);
} else if (stride_value == -1) {
- oper = new (graph->GetArena()) HSub(type, opb, opa);
+ oper = new (graph->GetAllocator()) HSub(type, opb, opa);
} else {
HInstruction* mul =
- new (graph->GetArena()) HMul(type, graph->GetConstant(type, stride_value), opa);
- oper = new (graph->GetArena()) HAdd(type, Insert(block, mul), opb);
+ new (allocator) HMul(type, graph->GetConstant(type, stride_value), opa);
+ oper = new (allocator) HAdd(type, Insert(block, mul), opb);
}
*result = Insert(block, oper);
}
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
index 1c84269..e5bc6ef 100644
--- a/compiler/optimizing/induction_var_range_test.cc
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -29,13 +29,11 @@
/**
* Fixture class for the InductionVarRange tests.
*/
-class InductionVarRangeTest : public CommonCompilerTest {
+class InductionVarRangeTest : public OptimizingUnitTest {
public:
InductionVarRangeTest()
- : pool_(),
- allocator_(&pool_),
- graph_(CreateGraph(&allocator_)),
- iva_(new (&allocator_) HInductionVarAnalysis(graph_)),
+ : graph_(CreateGraph()),
+ iva_(new (GetAllocator()) HInductionVarAnalysis(graph_)),
range_(iva_) {
BuildGraph();
}
@@ -61,22 +59,22 @@
/** Constructs bare minimum graph. */
void BuildGraph() {
graph_->SetNumberOfVRegs(1);
- entry_block_ = new (&allocator_) HBasicBlock(graph_);
- exit_block_ = new (&allocator_) HBasicBlock(graph_);
+ entry_block_ = new (GetAllocator()) HBasicBlock(graph_);
+ exit_block_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry_block_);
graph_->AddBlock(exit_block_);
graph_->SetEntryBlock(entry_block_);
graph_->SetExitBlock(exit_block_);
// Two parameters.
- x_ = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kInt32);
+ x_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kInt32);
entry_block_->AddInstruction(x_);
- y_ = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kInt32);
+ y_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kInt32);
entry_block_->AddInstruction(y_);
// Set arbitrary range analysis hint while testing private methods.
SetHint(x_);
@@ -85,13 +83,13 @@
/** Constructs loop with given upper bound. */
void BuildLoop(int32_t lower, HInstruction* upper, int32_t stride) {
// Control flow.
- loop_preheader_ = new (&allocator_) HBasicBlock(graph_);
+ loop_preheader_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(loop_preheader_);
- loop_header_ = new (&allocator_) HBasicBlock(graph_);
+ loop_header_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(loop_header_);
- loop_body_ = new (&allocator_) HBasicBlock(graph_);
+ loop_body_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(loop_body_);
- HBasicBlock* return_block = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* return_block = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(return_block);
entry_block_->AddSuccessor(loop_preheader_);
loop_preheader_->AddSuccessor(loop_header_);
@@ -100,24 +98,24 @@
loop_body_->AddSuccessor(loop_header_);
return_block->AddSuccessor(exit_block_);
// Instructions.
- loop_preheader_->AddInstruction(new (&allocator_) HGoto());
- HPhi* phi = new (&allocator_) HPhi(&allocator_, 0, 0, DataType::Type::kInt32);
+ loop_preheader_->AddInstruction(new (GetAllocator()) HGoto());
+ HPhi* phi = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32);
loop_header_->AddPhi(phi);
phi->AddInput(graph_->GetIntConstant(lower)); // i = l
if (stride > 0) {
- condition_ = new (&allocator_) HLessThan(phi, upper); // i < u
+ condition_ = new (GetAllocator()) HLessThan(phi, upper); // i < u
} else {
- condition_ = new (&allocator_) HGreaterThan(phi, upper); // i > u
+ condition_ = new (GetAllocator()) HGreaterThan(phi, upper); // i > u
}
loop_header_->AddInstruction(condition_);
- loop_header_->AddInstruction(new (&allocator_) HIf(condition_));
+ loop_header_->AddInstruction(new (GetAllocator()) HIf(condition_));
increment_ =
- new (&allocator_) HAdd(DataType::Type::kInt32, phi, graph_->GetIntConstant(stride));
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, phi, graph_->GetIntConstant(stride));
loop_body_->AddInstruction(increment_); // i += s
phi->AddInput(increment_);
- loop_body_->AddInstruction(new (&allocator_) HGoto());
- return_block->AddInstruction(new (&allocator_) HReturnVoid());
- exit_block_->AddInstruction(new (&allocator_) HExit());
+ loop_body_->AddInstruction(new (GetAllocator()) HGoto());
+ return_block->AddInstruction(new (GetAllocator()) HReturnVoid());
+ exit_block_->AddInstruction(new (GetAllocator()) HExit());
}
/** Constructs SSA and performs induction variable analysis. */
@@ -304,8 +302,6 @@
Value MaxValue(Value v1, Value v2) { return range_.MergeVal(v1, v2, false); }
// General building fields.
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
HBasicBlock* entry_block_;
HBasicBlock* exit_block_;
@@ -705,9 +701,9 @@
TEST_F(InductionVarRangeTest, ArrayLengthAndHints) {
// We pass a bogus constant for the class to avoid mocking one.
- HInstruction* new_array = new (&allocator_) HNewArray(x_, x_, 0);
+ HInstruction* new_array = new (GetAllocator()) HNewArray(x_, x_, 0);
entry_block_->AddInstruction(new_array);
- HInstruction* array_length = new (&allocator_) HArrayLength(new_array, 0);
+ HInstruction* array_length = new (GetAllocator()) HArrayLength(new_array, 0);
entry_block_->AddInstruction(array_length);
// With null hint: yields extreme constants.
const int32_t max_value = std::numeric_limits<int32_t>::max();
@@ -725,13 +721,13 @@
}
TEST_F(InductionVarRangeTest, AddOrSubAndConstant) {
- HInstruction* add = new (&allocator_)
+ HInstruction* add = new (GetAllocator())
HAdd(DataType::Type::kInt32, x_, graph_->GetIntConstant(-1));
- HInstruction* alt = new (&allocator_)
+ HInstruction* alt = new (GetAllocator())
HAdd(DataType::Type::kInt32, graph_->GetIntConstant(-1), x_);
- HInstruction* sub = new (&allocator_)
+ HInstruction* sub = new (GetAllocator())
HSub(DataType::Type::kInt32, x_, graph_->GetIntConstant(1));
- HInstruction* rev = new (&allocator_)
+ HInstruction* rev = new (GetAllocator())
HSub(DataType::Type::kInt32, graph_->GetIntConstant(1), x_);
entry_block_->AddInstruction(add);
entry_block_->AddInstruction(alt);
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 90e3d2a..4d846fa 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -705,7 +705,7 @@
uint32_t dex_pc) const {
ArtField* field = class_linker->GetClassRoot(ClassLinker::kJavaLangObject)->GetInstanceField(0);
DCHECK_EQ(std::string(field->GetName()), "shadow$_klass_");
- HInstanceFieldGet* result = new (graph_->GetArena()) HInstanceFieldGet(
+ HInstanceFieldGet* result = new (graph_->GetAllocator()) HInstanceFieldGet(
receiver,
field,
DataType::Type::kReference,
@@ -812,12 +812,12 @@
uint32_t dex_pc,
HInstruction* cursor,
HBasicBlock* bb_cursor) {
- HShouldDeoptimizeFlag* deopt_flag = new (graph_->GetArena())
- HShouldDeoptimizeFlag(graph_->GetArena(), dex_pc);
- HInstruction* compare = new (graph_->GetArena()) HNotEqual(
+ HShouldDeoptimizeFlag* deopt_flag = new (graph_->GetAllocator())
+ HShouldDeoptimizeFlag(graph_->GetAllocator(), dex_pc);
+ HInstruction* compare = new (graph_->GetAllocator()) HNotEqual(
deopt_flag, graph_->GetIntConstant(0, dex_pc));
- HInstruction* deopt = new (graph_->GetArena()) HDeoptimize(
- graph_->GetArena(), compare, DeoptimizationKind::kCHA, dex_pc);
+ HInstruction* deopt = new (graph_->GetAllocator()) HDeoptimize(
+ graph_->GetAllocator(), compare, DeoptimizationKind::kCHA, dex_pc);
if (cursor != nullptr) {
bb_cursor->InsertInstructionAfter(deopt_flag, cursor);
@@ -865,13 +865,13 @@
// Note that we will just compare the classes, so we don't need Java semantics access checks.
// Note that the type index and the dex file are relative to the method this type guard is
// inlined into.
- HLoadClass* load_class = new (graph_->GetArena()) HLoadClass(graph_->GetCurrentMethod(),
- class_index,
- caller_dex_file,
- klass,
- is_referrer,
- invoke_instruction->GetDexPc(),
- /* needs_access_check */ false);
+ HLoadClass* load_class = new (graph_->GetAllocator()) HLoadClass(graph_->GetCurrentMethod(),
+ class_index,
+ caller_dex_file,
+ klass,
+ is_referrer,
+ invoke_instruction->GetDexPc(),
+ /* needs_access_check */ false);
HLoadClass::LoadKind kind = HSharpening::ComputeLoadClassKind(
load_class, codegen_, compiler_driver_, caller_compilation_unit_);
DCHECK(kind != HLoadClass::LoadKind::kInvalid)
@@ -887,11 +887,11 @@
load_class->CopyEnvironmentFrom(invoke_instruction->GetEnvironment());
}
- HNotEqual* compare = new (graph_->GetArena()) HNotEqual(load_class, receiver_class);
+ HNotEqual* compare = new (graph_->GetAllocator()) HNotEqual(load_class, receiver_class);
bb_cursor->InsertInstructionAfter(compare, load_class);
if (with_deoptimization) {
- HDeoptimize* deoptimize = new (graph_->GetArena()) HDeoptimize(
- graph_->GetArena(),
+ HDeoptimize* deoptimize = new (graph_->GetAllocator()) HDeoptimize(
+ graph_->GetAllocator(),
compare,
receiver,
Runtime::Current()->IsAotCompiler()
@@ -1012,7 +1012,7 @@
uint32_t dex_pc = invoke_instruction->GetDexPc();
HBasicBlock* cursor_block = compare->GetBlock();
HBasicBlock* original_invoke_block = invoke_instruction->GetBlock();
- ArenaAllocator* allocator = graph_->GetArena();
+ ArenaAllocator* allocator = graph_->GetAllocator();
// Spit the block after the compare: `cursor_block` will now be the start of the diamond,
// and the returned block is the start of the then branch (that could contain multiple blocks).
@@ -1147,7 +1147,7 @@
DataType::Type type = Is64BitInstructionSet(graph_->GetInstructionSet())
? DataType::Type::kInt64
: DataType::Type::kInt32;
- HClassTableGet* class_table_get = new (graph_->GetArena()) HClassTableGet(
+ HClassTableGet* class_table_get = new (graph_->GetAllocator()) HClassTableGet(
receiver_class,
type,
invoke_instruction->IsInvokeVirtual() ? HClassTableGet::TableKind::kVTable
@@ -1164,7 +1164,7 @@
reinterpret_cast<intptr_t>(actual_method), invoke_instruction->GetDexPc());
}
- HNotEqual* compare = new (graph_->GetArena()) HNotEqual(class_table_get, constant);
+ HNotEqual* compare = new (graph_->GetAllocator()) HNotEqual(class_table_get, constant);
if (cursor != nullptr) {
bb_cursor->InsertInstructionAfter(receiver_class, cursor);
} else {
@@ -1176,8 +1176,8 @@
if (outermost_graph_->IsCompilingOsr()) {
CreateDiamondPatternForPolymorphicInline(compare, return_replacement, invoke_instruction);
} else {
- HDeoptimize* deoptimize = new (graph_->GetArena()) HDeoptimize(
- graph_->GetArena(),
+ HDeoptimize* deoptimize = new (graph_->GetAllocator()) HDeoptimize(
+ graph_->GetAllocator(),
compare,
receiver,
DeoptimizationKind::kJitSameTarget,
@@ -1240,8 +1240,8 @@
if (dex_method_index == dex::kDexNoIndex) {
return false;
}
- HInvokeVirtual* new_invoke = new (graph_->GetArena()) HInvokeVirtual(
- graph_->GetArena(),
+ HInvokeVirtual* new_invoke = new (graph_->GetAllocator()) HInvokeVirtual(
+ graph_->GetAllocator(),
invoke_instruction->GetNumberOfArguments(),
invoke_instruction->GetType(),
invoke_instruction->GetDexPc(),
@@ -1517,7 +1517,7 @@
DCHECK(obj != nullptr) << "only non-static methods can have a constructor fence";
HConstructorFence* constructor_fence =
- new (graph_->GetArena()) HConstructorFence(obj, kNoDexPc, graph_->GetArena());
+ new (graph_->GetAllocator()) HConstructorFence(obj, kNoDexPc, graph_->GetAllocator());
invoke_instruction->GetBlock()->InsertInstructionBefore(constructor_fence,
invoke_instruction);
}
@@ -1539,7 +1539,7 @@
ArtField* resolved_field =
class_linker->LookupResolvedField(field_index, referrer, /* is_static */ false);
DCHECK(resolved_field != nullptr);
- HInstanceFieldGet* iget = new (graph_->GetArena()) HInstanceFieldGet(
+ HInstanceFieldGet* iget = new (graph_->GetAllocator()) HInstanceFieldGet(
obj,
resolved_field,
DataType::FromShorty(resolved_field->GetTypeDescriptor()[0]),
@@ -1579,7 +1579,7 @@
DCHECK(referrer->IsConstructor());
*is_final = resolved_field->IsFinal();
}
- HInstanceFieldSet* iput = new (graph_->GetArena()) HInstanceFieldSet(
+ HInstanceFieldSet* iput = new (graph_->GetAllocator()) HInstanceFieldSet(
obj,
value,
resolved_field,
@@ -1641,8 +1641,9 @@
}
const int32_t caller_instruction_counter = graph_->GetCurrentInstructionId();
- HGraph* callee_graph = new (graph_->GetArena()) HGraph(
- graph_->GetArena(),
+ HGraph* callee_graph = new (graph_->GetAllocator()) HGraph(
+ graph_->GetAllocator(),
+ graph_->GetArenaStack(),
callee_dex_file,
method_index,
compiler_driver_->GetInstructionSet(),
@@ -1659,7 +1660,7 @@
if (stats_ != nullptr) {
// Reuse one object for all inline attempts from this caller to keep Arena memory usage low.
if (inline_stats_ == nullptr) {
- void* storage = graph_->GetArena()->Alloc<OptimizingCompilerStats>(kArenaAllocMisc);
+ void* storage = graph_->GetAllocator()->Alloc<OptimizingCompilerStats>(kArenaAllocMisc);
inline_stats_ = new (storage) OptimizingCompilerStats;
} else {
inline_stats_->Reset();
@@ -1672,7 +1673,6 @@
codegen_,
inline_stats_,
resolved_method->GetQuickenedInfo(class_linker->GetImagePointerSize()),
- dex_cache,
handles_);
if (builder.BuildGraph() != kAnalysisSuccess) {
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 6ad8036..b06d91c 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -59,8 +59,8 @@
// the first throwing instruction.
HInstruction* current_local_value = (*current_locals_)[i];
if (current_local_value != nullptr) {
- HPhi* phi = new (arena_) HPhi(
- arena_,
+ HPhi* phi = new (allocator_) HPhi(
+ allocator_,
i,
0,
current_local_value->GetType());
@@ -109,8 +109,8 @@
HInstruction* incoming =
ValueOfLocalAt(current_block_->GetLoopInformation()->GetPreHeader(), local);
if (incoming != nullptr) {
- HPhi* phi = new (arena_) HPhi(
- arena_,
+ HPhi* phi = new (allocator_) HPhi(
+ allocator_,
local,
0,
incoming->GetType());
@@ -148,8 +148,8 @@
if (is_different) {
HInstruction* first_input = ValueOfLocalAt(current_block_->GetPredecessors()[0], local);
- HPhi* phi = new (arena_) HPhi(
- arena_,
+ HPhi* phi = new (allocator_) HPhi(
+ allocator_,
local,
current_block_->GetPredecessors().size(),
first_input->GetType());
@@ -210,8 +210,8 @@
void HInstructionBuilder::InitializeInstruction(HInstruction* instruction) {
if (instruction->NeedsEnvironment()) {
- HEnvironment* environment = new (arena_) HEnvironment(
- arena_,
+ HEnvironment* environment = new (allocator_) HEnvironment(
+ allocator_,
current_locals_->size(),
graph_->GetArtMethod(),
instruction->GetDexPc(),
@@ -227,7 +227,7 @@
return ref;
}
- HNullCheck* null_check = new (arena_) HNullCheck(ref, dex_pc);
+ HNullCheck* null_check = new (allocator_) HNullCheck(ref, dex_pc);
AppendInstruction(null_check);
return null_check;
}
@@ -265,7 +265,7 @@
bool HInstructionBuilder::Build() {
locals_for_.resize(graph_->GetBlocks().size(),
- ArenaVector<HInstruction*>(arena_->Adapter(kArenaAllocGraphBuilder)));
+ ArenaVector<HInstruction*>(allocator_->Adapter(kArenaAllocGraphBuilder)));
// Find locations where we want to generate extra stackmaps for native debugging.
// This allows us to generate the info only at interesting points (for example,
@@ -275,7 +275,8 @@
ArenaBitVector* native_debug_info_locations = nullptr;
if (native_debuggable) {
const uint32_t num_instructions = code_item_.insns_size_in_code_units_;
- native_debug_info_locations = new (arena_) ArenaBitVector (arena_, num_instructions, false);
+ native_debug_info_locations =
+ new (allocator_) ArenaBitVector (allocator_, num_instructions, false);
FindNativeDebugInfoLocations(native_debug_info_locations);
}
@@ -287,14 +288,14 @@
if (current_block_->IsEntryBlock()) {
InitializeParameters();
- AppendInstruction(new (arena_) HSuspendCheck(0u));
- AppendInstruction(new (arena_) HGoto(0u));
+ AppendInstruction(new (allocator_) HSuspendCheck(0u));
+ AppendInstruction(new (allocator_) HGoto(0u));
continue;
} else if (current_block_->IsExitBlock()) {
- AppendInstruction(new (arena_) HExit());
+ AppendInstruction(new (allocator_) HExit());
continue;
} else if (current_block_->IsLoopHeader()) {
- HSuspendCheck* suspend_check = new (arena_) HSuspendCheck(current_block_->GetDexPc());
+ HSuspendCheck* suspend_check = new (allocator_) HSuspendCheck(current_block_->GetDexPc());
current_block_->GetLoopInformation()->SetSuspendCheck(suspend_check);
// This is slightly odd because the loop header might not be empty (TryBoundary).
// But we're still creating the environment with locals from the top of the block.
@@ -331,7 +332,7 @@
}
if (native_debuggable && native_debug_info_locations->IsBitSet(dex_pc)) {
- AppendInstruction(new (arena_) HNativeDebugInfo(dex_pc));
+ AppendInstruction(new (allocator_) HNativeDebugInfo(dex_pc));
}
if (!ProcessDexInstruction(it.CurrentInstruction(), dex_pc, quicken_index)) {
@@ -348,7 +349,7 @@
// instruction of the current block is not a branching instruction.
// We add an unconditional Goto to the next block.
DCHECK_EQ(current_block_->GetSuccessors().size(), 1u);
- AppendInstruction(new (arena_) HGoto());
+ AppendInstruction(new (allocator_) HGoto());
}
}
@@ -452,7 +453,7 @@
dex_file_->GetMethodId(dex_compilation_unit_->GetDexMethodIndex());
if (!dex_compilation_unit_->IsStatic()) {
// Add the implicit 'this' argument, not expressed in the signature.
- HParameterValue* parameter = new (arena_) HParameterValue(*dex_file_,
+ HParameterValue* parameter = new (allocator_) HParameterValue(*dex_file_,
referrer_method_id.class_idx_,
parameter_index++,
DataType::Type::kReference,
@@ -468,7 +469,7 @@
const DexFile::ProtoId& proto = dex_file_->GetMethodPrototype(referrer_method_id);
const DexFile::TypeList* arg_types = dex_file_->GetProtoParameters(proto);
for (int i = 0, shorty_pos = 1; i < number_of_parameters; i++) {
- HParameterValue* parameter = new (arena_) HParameterValue(
+ HParameterValue* parameter = new (allocator_) HParameterValue(
*dex_file_,
arg_types->GetTypeItem(shorty_pos - 1).type_idx_,
parameter_index++,
@@ -491,18 +492,18 @@
void HInstructionBuilder::If_22t(const Instruction& instruction, uint32_t dex_pc) {
HInstruction* first = LoadLocal(instruction.VRegA(), DataType::Type::kInt32);
HInstruction* second = LoadLocal(instruction.VRegB(), DataType::Type::kInt32);
- T* comparison = new (arena_) T(first, second, dex_pc);
+ T* comparison = new (allocator_) T(first, second, dex_pc);
AppendInstruction(comparison);
- AppendInstruction(new (arena_) HIf(comparison, dex_pc));
+ AppendInstruction(new (allocator_) HIf(comparison, dex_pc));
current_block_ = nullptr;
}
template<typename T>
void HInstructionBuilder::If_21t(const Instruction& instruction, uint32_t dex_pc) {
HInstruction* value = LoadLocal(instruction.VRegA(), DataType::Type::kInt32);
- T* comparison = new (arena_) T(value, graph_->GetIntConstant(0, dex_pc), dex_pc);
+ T* comparison = new (allocator_) T(value, graph_->GetIntConstant(0, dex_pc), dex_pc);
AppendInstruction(comparison);
- AppendInstruction(new (arena_) HIf(comparison, dex_pc));
+ AppendInstruction(new (allocator_) HIf(comparison, dex_pc));
current_block_ = nullptr;
}
@@ -511,7 +512,7 @@
DataType::Type type,
uint32_t dex_pc) {
HInstruction* first = LoadLocal(instruction.VRegB(), type);
- AppendInstruction(new (arena_) T(type, first, dex_pc));
+ AppendInstruction(new (allocator_) T(type, first, dex_pc));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
@@ -520,7 +521,7 @@
DataType::Type result_type,
uint32_t dex_pc) {
HInstruction* first = LoadLocal(instruction.VRegB(), input_type);
- AppendInstruction(new (arena_) HTypeConversion(result_type, first, dex_pc));
+ AppendInstruction(new (allocator_) HTypeConversion(result_type, first, dex_pc));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
@@ -530,7 +531,7 @@
uint32_t dex_pc) {
HInstruction* first = LoadLocal(instruction.VRegB(), type);
HInstruction* second = LoadLocal(instruction.VRegC(), type);
- AppendInstruction(new (arena_) T(type, first, second, dex_pc));
+ AppendInstruction(new (allocator_) T(type, first, second, dex_pc));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
@@ -540,7 +541,7 @@
uint32_t dex_pc) {
HInstruction* first = LoadLocal(instruction.VRegB(), type);
HInstruction* second = LoadLocal(instruction.VRegC(), DataType::Type::kInt32);
- AppendInstruction(new (arena_) T(type, first, second, dex_pc));
+ AppendInstruction(new (allocator_) T(type, first, second, dex_pc));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
@@ -550,7 +551,7 @@
uint32_t dex_pc) {
HInstruction* first = LoadLocal(instruction.VRegB(), type);
HInstruction* second = LoadLocal(instruction.VRegC(), type);
- AppendInstruction(new (arena_) HCompare(type, first, second, bias, dex_pc));
+ AppendInstruction(new (allocator_) HCompare(type, first, second, bias, dex_pc));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
@@ -560,7 +561,7 @@
uint32_t dex_pc) {
HInstruction* first = LoadLocal(instruction.VRegA(), type);
HInstruction* second = LoadLocal(instruction.VRegB(), DataType::Type::kInt32);
- AppendInstruction(new (arena_) T(type, first, second, dex_pc));
+ AppendInstruction(new (allocator_) T(type, first, second, dex_pc));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
@@ -570,7 +571,7 @@
uint32_t dex_pc) {
HInstruction* first = LoadLocal(instruction.VRegA(), type);
HInstruction* second = LoadLocal(instruction.VRegB(), type);
- AppendInstruction(new (arena_) T(type, first, second, dex_pc));
+ AppendInstruction(new (allocator_) T(type, first, second, dex_pc));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
@@ -581,7 +582,7 @@
if (reverse) {
std::swap(first, second);
}
- AppendInstruction(new (arena_) T(DataType::Type::kInt32, first, second, dex_pc));
+ AppendInstruction(new (allocator_) T(DataType::Type::kInt32, first, second, dex_pc));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
@@ -592,7 +593,7 @@
if (reverse) {
std::swap(first, second);
}
- AppendInstruction(new (arena_) T(DataType::Type::kInt32, first, second, dex_pc));
+ AppendInstruction(new (allocator_) T(DataType::Type::kInt32, first, second, dex_pc));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
@@ -630,13 +631,13 @@
if (table.GetNumEntries() == 0) {
// Empty Switch. Code falls through to the next block.
DCHECK(IsFallthroughInstruction(instruction, dex_pc, current_block_));
- AppendInstruction(new (arena_) HGoto(dex_pc));
+ AppendInstruction(new (allocator_) HGoto(dex_pc));
} else if (table.ShouldBuildDecisionTree()) {
for (DexSwitchTableIterator it(table); !it.Done(); it.Advance()) {
HInstruction* case_value = graph_->GetIntConstant(it.CurrentKey(), dex_pc);
- HEqual* comparison = new (arena_) HEqual(value, case_value, dex_pc);
+ HEqual* comparison = new (allocator_) HEqual(value, case_value, dex_pc);
AppendInstruction(comparison);
- AppendInstruction(new (arena_) HIf(comparison, dex_pc));
+ AppendInstruction(new (allocator_) HIf(comparison, dex_pc));
if (!it.IsLast()) {
current_block_ = FindBlockStartingAt(it.GetDexPcForCurrentIndex());
@@ -644,7 +645,7 @@
}
} else {
AppendInstruction(
- new (arena_) HPackedSwitch(table.GetEntryAt(0), table.GetNumEntries(), value, dex_pc));
+ new (allocator_) HPackedSwitch(table.GetEntryAt(0), table.GetNumEntries(), value, dex_pc));
}
current_block_ = nullptr;
@@ -664,16 +665,16 @@
HInstruction* fence_target = current_this_parameter_;
DCHECK(fence_target != nullptr);
- AppendInstruction(new (arena_) HConstructorFence(fence_target, dex_pc, arena_));
+ AppendInstruction(new (allocator_) HConstructorFence(fence_target, dex_pc, allocator_));
MaybeRecordStat(
compilation_stats_,
MethodCompilationStat::kConstructorFenceGeneratedFinal);
}
- AppendInstruction(new (arena_) HReturnVoid(dex_pc));
+ AppendInstruction(new (allocator_) HReturnVoid(dex_pc));
} else {
DCHECK(!RequiresConstructorBarrier(dex_compilation_unit_, compiler_driver_));
HInstruction* value = LoadLocal(instruction.VRegA(), type);
- AppendInstruction(new (arena_) HReturn(value, dex_pc));
+ AppendInstruction(new (allocator_) HReturn(value, dex_pc));
}
current_block_ = nullptr;
}
@@ -816,12 +817,12 @@
if (UNLIKELY(resolved_method == nullptr)) {
MaybeRecordStat(compilation_stats_,
MethodCompilationStat::kUnresolvedMethod);
- HInvoke* invoke = new (arena_) HInvokeUnresolved(arena_,
- number_of_arguments,
- return_type,
- dex_pc,
- method_idx,
- invoke_type);
+ HInvoke* invoke = new (allocator_) HInvokeUnresolved(allocator_,
+ number_of_arguments,
+ return_type,
+ dex_pc,
+ method_idx,
+ invoke_type);
return HandleInvoke(invoke,
number_of_vreg_arguments,
args,
@@ -841,8 +842,8 @@
dchecked_integral_cast<uint64_t>(string_init_entry_point)
};
MethodReference target_method(dex_file_, method_idx);
- HInvoke* invoke = new (arena_) HInvokeStaticOrDirect(
- arena_,
+ HInvoke* invoke = new (allocator_) HInvokeStaticOrDirect(
+ allocator_,
number_of_arguments - 1,
DataType::Type::kReference /*return_type */,
dex_pc,
@@ -887,35 +888,35 @@
};
MethodReference target_method(resolved_method->GetDexFile(),
resolved_method->GetDexMethodIndex());
- invoke = new (arena_) HInvokeStaticOrDirect(arena_,
- number_of_arguments,
- return_type,
- dex_pc,
- method_idx,
- resolved_method,
- dispatch_info,
- invoke_type,
- target_method,
- clinit_check_requirement);
+ invoke = new (allocator_) HInvokeStaticOrDirect(allocator_,
+ number_of_arguments,
+ return_type,
+ dex_pc,
+ method_idx,
+ resolved_method,
+ dispatch_info,
+ invoke_type,
+ target_method,
+ clinit_check_requirement);
} else if (invoke_type == kVirtual) {
ScopedObjectAccess soa(Thread::Current()); // Needed for the method index
- invoke = new (arena_) HInvokeVirtual(arena_,
- number_of_arguments,
- return_type,
- dex_pc,
- method_idx,
- resolved_method,
- resolved_method->GetMethodIndex());
+ invoke = new (allocator_) HInvokeVirtual(allocator_,
+ number_of_arguments,
+ return_type,
+ dex_pc,
+ method_idx,
+ resolved_method,
+ resolved_method->GetMethodIndex());
} else {
DCHECK_EQ(invoke_type, kInterface);
ScopedObjectAccess soa(Thread::Current()); // Needed for the IMT index.
- invoke = new (arena_) HInvokeInterface(arena_,
- number_of_arguments,
- return_type,
- dex_pc,
- method_idx,
- resolved_method,
- ImTable::GetImtIndex(resolved_method));
+ invoke = new (allocator_) HInvokeInterface(allocator_,
+ number_of_arguments,
+ return_type,
+ dex_pc,
+ method_idx,
+ resolved_method,
+ ImTable::GetImtIndex(resolved_method));
}
return HandleInvoke(invoke,
@@ -940,11 +941,11 @@
DCHECK_EQ(1 + ArtMethod::NumArgRegisters(descriptor), number_of_vreg_arguments);
DataType::Type return_type = DataType::FromShorty(descriptor[0]);
size_t number_of_arguments = strlen(descriptor);
- HInvoke* invoke = new (arena_) HInvokePolymorphic(arena_,
- number_of_arguments,
- return_type,
- dex_pc,
- method_idx);
+ HInvoke* invoke = new (allocator_) HInvokePolymorphic(allocator_,
+ number_of_arguments,
+ return_type,
+ dex_pc,
+ method_idx);
return HandleInvoke(invoke,
number_of_vreg_arguments,
args,
@@ -964,7 +965,7 @@
Handle<mirror::Class> klass = load_class->GetClass();
if (!IsInitialized(klass)) {
- cls = new (arena_) HClinitCheck(load_class, dex_pc);
+ cls = new (allocator_) HClinitCheck(load_class, dex_pc);
AppendInstruction(cls);
}
@@ -979,7 +980,7 @@
// Consider classes we haven't resolved as potentially finalizable.
bool finalizable = (klass == nullptr) || klass->IsFinalizable();
- HNewInstance* new_instance = new (arena_) HNewInstance(
+ HNewInstance* new_instance = new (allocator_) HNewInstance(
cls,
dex_pc,
type_index,
@@ -1036,7 +1037,7 @@
// (and in theory the 0-initializing, but that happens automatically
// when new memory pages are mapped in by the OS).
HConstructorFence* ctor_fence =
- new (arena_) HConstructorFence(allocation, allocation->GetDexPc(), arena_);
+ new (allocator_) HConstructorFence(allocation, allocation->GetDexPc(), allocator_);
AppendInstruction(ctor_fence);
MaybeRecordStat(
compilation_stats_,
@@ -1090,7 +1091,7 @@
/* needs_access_check */ false);
if (cls != nullptr) {
*clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit;
- clinit_check = new (arena_) HClinitCheck(cls, dex_pc);
+ clinit_check = new (allocator_) HClinitCheck(cls, dex_pc);
AppendInstruction(clinit_check);
}
}
@@ -1290,23 +1291,23 @@
if (resolved_field == nullptr) {
MaybeRecordStat(compilation_stats_,
MethodCompilationStat::kUnresolvedField);
- field_set = new (arena_) HUnresolvedInstanceFieldSet(object,
- value,
- field_type,
- field_index,
- dex_pc);
+ field_set = new (allocator_) HUnresolvedInstanceFieldSet(object,
+ value,
+ field_type,
+ field_index,
+ dex_pc);
} else {
uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex();
- field_set = new (arena_) HInstanceFieldSet(object,
- value,
- resolved_field,
- field_type,
- resolved_field->GetOffset(),
- resolved_field->IsVolatile(),
- field_index,
- class_def_index,
- *dex_file_,
- dex_pc);
+ field_set = new (allocator_) HInstanceFieldSet(object,
+ value,
+ resolved_field,
+ field_type,
+ resolved_field->GetOffset(),
+ resolved_field->IsVolatile(),
+ field_index,
+ class_def_index,
+ *dex_file_,
+ dex_pc);
}
AppendInstruction(field_set);
} else {
@@ -1314,21 +1315,21 @@
if (resolved_field == nullptr) {
MaybeRecordStat(compilation_stats_,
MethodCompilationStat::kUnresolvedField);
- field_get = new (arena_) HUnresolvedInstanceFieldGet(object,
- field_type,
- field_index,
- dex_pc);
+ field_get = new (allocator_) HUnresolvedInstanceFieldGet(object,
+ field_type,
+ field_index,
+ dex_pc);
} else {
uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex();
- field_get = new (arena_) HInstanceFieldGet(object,
- resolved_field,
- field_type,
- resolved_field->GetOffset(),
- resolved_field->IsVolatile(),
- field_index,
- class_def_index,
- *dex_file_,
- dex_pc);
+ field_get = new (allocator_) HInstanceFieldGet(object,
+ resolved_field,
+ field_type,
+ resolved_field->GetOffset(),
+ resolved_field->IsVolatile(),
+ field_index,
+ class_def_index,
+ *dex_file_,
+ dex_pc);
}
AppendInstruction(field_get);
UpdateLocal(source_or_dest_reg, field_get);
@@ -1382,9 +1383,9 @@
if (is_put) {
HInstruction* value = LoadLocal(source_or_dest_reg, field_type);
AppendInstruction(
- new (arena_) HUnresolvedStaticFieldSet(value, field_type, field_index, dex_pc));
+ new (allocator_) HUnresolvedStaticFieldSet(value, field_type, field_index, dex_pc));
} else {
- AppendInstruction(new (arena_) HUnresolvedStaticFieldGet(field_type, field_index, dex_pc));
+ AppendInstruction(new (allocator_) HUnresolvedStaticFieldGet(field_type, field_index, dex_pc));
UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
}
}
@@ -1475,7 +1476,7 @@
HInstruction* cls = constant;
if (!IsInitialized(klass)) {
- cls = new (arena_) HClinitCheck(constant, dex_pc);
+ cls = new (allocator_) HClinitCheck(constant, dex_pc);
AppendInstruction(cls);
}
@@ -1484,38 +1485,38 @@
// We need to keep the class alive before loading the value.
HInstruction* value = LoadLocal(source_or_dest_reg, field_type);
DCHECK_EQ(HPhi::ToPhiType(value->GetType()), HPhi::ToPhiType(field_type));
- AppendInstruction(new (arena_) HStaticFieldSet(cls,
- value,
- resolved_field,
- field_type,
- resolved_field->GetOffset(),
- resolved_field->IsVolatile(),
- field_index,
- class_def_index,
- *dex_file_,
- dex_pc));
+ AppendInstruction(new (allocator_) HStaticFieldSet(cls,
+ value,
+ resolved_field,
+ field_type,
+ resolved_field->GetOffset(),
+ resolved_field->IsVolatile(),
+ field_index,
+ class_def_index,
+ *dex_file_,
+ dex_pc));
} else {
- AppendInstruction(new (arena_) HStaticFieldGet(cls,
- resolved_field,
- field_type,
- resolved_field->GetOffset(),
- resolved_field->IsVolatile(),
- field_index,
- class_def_index,
- *dex_file_,
- dex_pc));
+ AppendInstruction(new (allocator_) HStaticFieldGet(cls,
+ resolved_field,
+ field_type,
+ resolved_field->GetOffset(),
+ resolved_field->IsVolatile(),
+ field_index,
+ class_def_index,
+ *dex_file_,
+ dex_pc));
UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
}
return true;
}
void HInstructionBuilder::BuildCheckedDivRem(uint16_t out_vreg,
- uint16_t first_vreg,
- int64_t second_vreg_or_constant,
- uint32_t dex_pc,
- DataType::Type type,
- bool second_is_constant,
- bool isDiv) {
+ uint16_t first_vreg,
+ int64_t second_vreg_or_constant,
+ uint32_t dex_pc,
+ DataType::Type type,
+ bool second_is_constant,
+ bool isDiv) {
DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64);
HInstruction* first = LoadLocal(first_vreg, type);
@@ -1533,14 +1534,14 @@
if (!second_is_constant
|| (type == DataType::Type::kInt32 && second->AsIntConstant()->GetValue() == 0)
|| (type == DataType::Type::kInt64 && second->AsLongConstant()->GetValue() == 0)) {
- second = new (arena_) HDivZeroCheck(second, dex_pc);
+ second = new (allocator_) HDivZeroCheck(second, dex_pc);
AppendInstruction(second);
}
if (isDiv) {
- AppendInstruction(new (arena_) HDiv(type, first, second, dex_pc));
+ AppendInstruction(new (allocator_) HDiv(type, first, second, dex_pc));
} else {
- AppendInstruction(new (arena_) HRem(type, first, second, dex_pc));
+ AppendInstruction(new (allocator_) HRem(type, first, second, dex_pc));
}
UpdateLocal(out_vreg, current_block_->GetLastInstruction());
}
@@ -1554,19 +1555,19 @@
uint8_t index_reg = instruction.VRegC_23x();
HInstruction* object = LoadNullCheckedLocal(array_reg, dex_pc);
- HInstruction* length = new (arena_) HArrayLength(object, dex_pc);
+ HInstruction* length = new (allocator_) HArrayLength(object, dex_pc);
AppendInstruction(length);
HInstruction* index = LoadLocal(index_reg, DataType::Type::kInt32);
- index = new (arena_) HBoundsCheck(index, length, dex_pc);
+ index = new (allocator_) HBoundsCheck(index, length, dex_pc);
AppendInstruction(index);
if (is_put) {
HInstruction* value = LoadLocal(source_or_dest_reg, anticipated_type);
// TODO: Insert a type check node if the type is Object.
- HArraySet* aset = new (arena_) HArraySet(object, index, value, anticipated_type, dex_pc);
+ HArraySet* aset = new (allocator_) HArraySet(object, index, value, anticipated_type, dex_pc);
ssa_builder_->MaybeAddAmbiguousArraySet(aset);
AppendInstruction(aset);
} else {
- HArrayGet* aget = new (arena_) HArrayGet(object, index, anticipated_type, dex_pc);
+ HArrayGet* aget = new (allocator_) HArrayGet(object, index, anticipated_type, dex_pc);
ssa_builder_->MaybeAddAmbiguousArrayGet(aget);
AppendInstruction(aget);
UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
@@ -1582,7 +1583,7 @@
uint32_t register_index) {
HInstruction* length = graph_->GetIntConstant(number_of_vreg_arguments, dex_pc);
HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
- HNewArray* const object = new (arena_) HNewArray(cls, length, dex_pc);
+ HNewArray* const object = new (allocator_) HNewArray(cls, length, dex_pc);
AppendInstruction(object);
const char* descriptor = dex_file_->StringByTypeIdx(type_index);
@@ -1597,7 +1598,7 @@
for (size_t i = 0; i < number_of_vreg_arguments; ++i) {
HInstruction* value = LoadLocal(is_range ? register_index + i : args[i], type);
HInstruction* index = graph_->GetIntConstant(i, dex_pc);
- HArraySet* aset = new (arena_) HArraySet(object, index, value, type, dex_pc);
+ HArraySet* aset = new (allocator_) HArraySet(object, index, value, type, dex_pc);
ssa_builder_->MaybeAddAmbiguousArraySet(aset);
AppendInstruction(aset);
}
@@ -1615,7 +1616,7 @@
for (uint32_t i = 0; i < element_count; ++i) {
HInstruction* index = graph_->GetIntConstant(i, dex_pc);
HInstruction* value = graph_->GetIntConstant(data[i], dex_pc);
- HArraySet* aset = new (arena_) HArraySet(object, index, value, anticipated_type, dex_pc);
+ HArraySet* aset = new (allocator_) HArraySet(object, index, value, anticipated_type, dex_pc);
ssa_builder_->MaybeAddAmbiguousArraySet(aset);
AppendInstruction(aset);
}
@@ -1635,13 +1636,13 @@
return;
}
- HInstruction* length = new (arena_) HArrayLength(array, dex_pc);
+ HInstruction* length = new (allocator_) HArrayLength(array, dex_pc);
AppendInstruction(length);
// Implementation of this DEX instruction seems to be that the bounds check is
// done before doing any stores.
HInstruction* last_index = graph_->GetIntConstant(payload->element_count - 1, dex_pc);
- AppendInstruction(new (arena_) HBoundsCheck(last_index, length, dex_pc));
+ AppendInstruction(new (allocator_) HBoundsCheck(last_index, length, dex_pc));
switch (payload->element_width) {
case 1:
@@ -1684,7 +1685,8 @@
for (uint32_t i = 0; i < element_count; ++i) {
HInstruction* index = graph_->GetIntConstant(i, dex_pc);
HInstruction* value = graph_->GetLongConstant(data[i], dex_pc);
- HArraySet* aset = new (arena_) HArraySet(object, index, value, DataType::Type::kInt64, dex_pc);
+ HArraySet* aset =
+ new (allocator_) HArraySet(object, index, value, DataType::Type::kInt64, dex_pc);
ssa_builder_->MaybeAddAmbiguousArraySet(aset);
AppendInstruction(aset);
}
@@ -1752,7 +1754,7 @@
}
// Note: `klass` must be from `handles_`.
- HLoadClass* load_class = new (arena_) HLoadClass(
+ HLoadClass* load_class = new (allocator_) HLoadClass(
graph_->GetCurrentMethod(),
type_index,
*actual_dex_file,
@@ -1787,15 +1789,15 @@
ScopedObjectAccess soa(Thread::Current());
TypeCheckKind check_kind = ComputeTypeCheckKind(cls->GetClass());
if (instruction.Opcode() == Instruction::INSTANCE_OF) {
- AppendInstruction(new (arena_) HInstanceOf(object, cls, check_kind, dex_pc));
+ AppendInstruction(new (allocator_) HInstanceOf(object, cls, check_kind, dex_pc));
UpdateLocal(destination, current_block_->GetLastInstruction());
} else {
DCHECK_EQ(instruction.Opcode(), Instruction::CHECK_CAST);
// We emit a CheckCast followed by a BoundType. CheckCast is a statement
// which may throw. If it succeeds BoundType sets the new type of `object`
// for all subsequent uses.
- AppendInstruction(new (arena_) HCheckCast(object, cls, check_kind, dex_pc));
- AppendInstruction(new (arena_) HBoundType(object, dex_pc));
+ AppendInstruction(new (allocator_) HCheckCast(object, cls, check_kind, dex_pc));
+ AppendInstruction(new (allocator_) HBoundType(object, dex_pc));
UpdateLocal(reference, current_block_->GetLastInstruction());
}
}
@@ -1943,7 +1945,7 @@
case Instruction::GOTO:
case Instruction::GOTO_16:
case Instruction::GOTO_32: {
- AppendInstruction(new (arena_) HGoto(dex_pc));
+ AppendInstruction(new (allocator_) HGoto(dex_pc));
current_block_ = nullptr;
break;
}
@@ -2580,7 +2582,7 @@
HInstruction* length = LoadLocal(instruction.VRegB_22c(), DataType::Type::kInt32);
HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
- HNewArray* new_array = new (arena_) HNewArray(cls, length, dex_pc);
+ HNewArray* new_array = new (allocator_) HNewArray(cls, length, dex_pc);
AppendInstruction(new_array);
UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction());
BuildConstructorFenceForAllocation(new_array);
@@ -2744,23 +2746,27 @@
case Instruction::ARRAY_LENGTH: {
HInstruction* object = LoadNullCheckedLocal(instruction.VRegB_12x(), dex_pc);
- AppendInstruction(new (arena_) HArrayLength(object, dex_pc));
+ AppendInstruction(new (allocator_) HArrayLength(object, dex_pc));
UpdateLocal(instruction.VRegA_12x(), current_block_->GetLastInstruction());
break;
}
case Instruction::CONST_STRING: {
dex::StringIndex string_index(instruction.VRegB_21c());
- AppendInstruction(
- new (arena_) HLoadString(graph_->GetCurrentMethod(), string_index, *dex_file_, dex_pc));
+ AppendInstruction(new (allocator_) HLoadString(graph_->GetCurrentMethod(),
+ string_index,
+ *dex_file_,
+ dex_pc));
UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
break;
}
case Instruction::CONST_STRING_JUMBO: {
dex::StringIndex string_index(instruction.VRegB_31c());
- AppendInstruction(
- new (arena_) HLoadString(graph_->GetCurrentMethod(), string_index, *dex_file_, dex_pc));
+ AppendInstruction(new (allocator_) HLoadString(graph_->GetCurrentMethod(),
+ string_index,
+ *dex_file_,
+ dex_pc));
UpdateLocal(instruction.VRegA_31c(), current_block_->GetLastInstruction());
break;
}
@@ -2773,15 +2779,15 @@
}
case Instruction::MOVE_EXCEPTION: {
- AppendInstruction(new (arena_) HLoadException(dex_pc));
+ AppendInstruction(new (allocator_) HLoadException(dex_pc));
UpdateLocal(instruction.VRegA_11x(), current_block_->GetLastInstruction());
- AppendInstruction(new (arena_) HClearException(dex_pc));
+ AppendInstruction(new (allocator_) HClearException(dex_pc));
break;
}
case Instruction::THROW: {
HInstruction* exception = LoadLocal(instruction.VRegA_11x(), DataType::Type::kReference);
- AppendInstruction(new (arena_) HThrow(exception, dex_pc));
+ AppendInstruction(new (allocator_) HThrow(exception, dex_pc));
// We finished building this block. Set the current block to null to avoid
// adding dead instructions to it.
current_block_ = nullptr;
@@ -2804,7 +2810,7 @@
}
case Instruction::MONITOR_ENTER: {
- AppendInstruction(new (arena_) HMonitorOperation(
+ AppendInstruction(new (allocator_) HMonitorOperation(
LoadLocal(instruction.VRegA_11x(), DataType::Type::kReference),
HMonitorOperation::OperationKind::kEnter,
dex_pc));
@@ -2812,7 +2818,7 @@
}
case Instruction::MONITOR_EXIT: {
- AppendInstruction(new (arena_) HMonitorOperation(
+ AppendInstruction(new (allocator_) HMonitorOperation(
LoadLocal(instruction.VRegA_11x(), DataType::Type::kReference),
HMonitorOperation::OperationKind::kExit,
dex_pc));
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index a684bf4..79d6ddc 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -43,15 +43,15 @@
const DexFile* dex_file,
const DexFile::CodeItem& code_item,
DataType::Type return_type,
- DexCompilationUnit* dex_compilation_unit,
- const DexCompilationUnit* const outer_compilation_unit,
+ const DexCompilationUnit* dex_compilation_unit,
+ const DexCompilationUnit* outer_compilation_unit,
CompilerDriver* driver,
CodeGenerator* code_generator,
const uint8_t* interpreter_metadata,
OptimizingCompilerStats* compiler_stats,
Handle<mirror::DexCache> dex_cache,
VariableSizedHandleScope* handles)
- : arena_(graph->GetArena()),
+ : allocator_(graph->GetAllocator()),
graph_(graph),
handles_(handles),
dex_file_(dex_file),
@@ -59,7 +59,7 @@
return_type_(return_type),
block_builder_(block_builder),
ssa_builder_(ssa_builder),
- locals_for_(arena_->Adapter(kArenaAllocGraphBuilder)),
+ locals_for_(allocator_->Adapter(kArenaAllocGraphBuilder)),
current_block_(nullptr),
current_locals_(nullptr),
latest_result_(nullptr),
@@ -71,7 +71,7 @@
quicken_info_(interpreter_metadata),
compilation_stats_(compiler_stats),
dex_cache_(dex_cache),
- loop_headers_(graph->GetArena()->Adapter(kArenaAllocGraphBuilder)) {
+ loop_headers_(allocator_->Adapter(kArenaAllocGraphBuilder)) {
loop_headers_.reserve(kDefaultNumberOfLoops);
}
@@ -312,7 +312,7 @@
ObjPtr<mirror::Class> LookupReferrerClass() const REQUIRES_SHARED(Locks::mutator_lock_);
- ArenaAllocator* const arena_;
+ ArenaAllocator* const allocator_;
HGraph* const graph_;
VariableSizedHandleScope* handles_;
@@ -342,7 +342,7 @@
// The compilation unit of the current method being compiled. Note that
// it can be an inlined method.
- DexCompilationUnit* const dex_compilation_unit_;
+ const DexCompilationUnit* const dex_compilation_unit_;
// The compilation unit of the outermost method being compiled. That is the
// method being compiled (and not inlined), and potentially inlining other
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 6610bcc..f39acab 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -186,7 +186,7 @@
binop->ReplaceInput(right_neg->GetInput(), 1);
left_neg->GetBlock()->RemoveInstruction(left_neg);
right_neg->GetBlock()->RemoveInstruction(right_neg);
- HNeg* neg = new (GetGraph()->GetArena()) HNeg(binop->GetType(), binop);
+ HNeg* neg = new (GetGraph()->GetAllocator()) HNeg(binop->GetType(), binop);
binop->GetBlock()->InsertInstructionBefore(neg, binop->GetNext());
binop->ReplaceWithExceptInReplacementAtIndex(neg, 0);
RecordSimplification();
@@ -225,15 +225,15 @@
// Replace the `HAnd` or `HOr`.
HBinaryOperation* hbin;
if (op->IsAnd()) {
- hbin = new (GetGraph()->GetArena()) HOr(type, src_left, src_right, dex_pc);
+ hbin = new (GetGraph()->GetAllocator()) HOr(type, src_left, src_right, dex_pc);
} else {
- hbin = new (GetGraph()->GetArena()) HAnd(type, src_left, src_right, dex_pc);
+ hbin = new (GetGraph()->GetAllocator()) HAnd(type, src_left, src_right, dex_pc);
}
HInstruction* hnot;
if (left->IsBooleanNot()) {
- hnot = new (GetGraph()->GetArena()) HBooleanNot(hbin, dex_pc);
+ hnot = new (GetGraph()->GetAllocator()) HBooleanNot(hbin, dex_pc);
} else {
- hnot = new (GetGraph()->GetArena()) HNot(type, hbin, dex_pc);
+ hnot = new (GetGraph()->GetAllocator()) HNot(type, hbin, dex_pc);
}
op->GetBlock()->InsertInstructionBefore(hbin, op);
@@ -274,7 +274,7 @@
return false;
}
- ArenaAllocator* arena = mul->GetBlock()->GetGraph()->GetArena();
+ ArenaAllocator* allocator = mul->GetBlock()->GetGraph()->GetAllocator();
if (mul->HasOnlyOneNonEnvironmentUse()) {
HInstruction* use = mul->GetUses().front().GetUser();
@@ -307,14 +307,14 @@
use->IsVecAdd() ? HInstruction::kAdd : HInstruction::kSub;
if (accumulator != nullptr) {
HVecMultiplyAccumulate* mulacc =
- new (arena) HVecMultiplyAccumulate(arena,
- kind,
- accumulator,
- mul->GetLeft(),
- mul->GetRight(),
- binop->GetPackedType(),
- binop->GetVectorLength(),
- binop->GetDexPc());
+ new (allocator) HVecMultiplyAccumulate(allocator,
+ kind,
+ accumulator,
+ mul->GetLeft(),
+ mul->GetRight(),
+ binop->GetPackedType(),
+ binop->GetVectorLength(),
+ binop->GetDexPc());
binop->GetBlock()->ReplaceAndRemoveInstructionWith(binop, mulacc);
DCHECK(!mul->HasUses());
@@ -407,7 +407,8 @@
HUShr* ushr,
HShl* shl) {
DCHECK(op->IsAdd() || op->IsXor() || op->IsOr()) << op->DebugName();
- HRor* ror = new (GetGraph()->GetArena()) HRor(ushr->GetType(), ushr->GetLeft(), ushr->GetRight());
+ HRor* ror =
+ new (GetGraph()->GetAllocator()) HRor(ushr->GetType(), ushr->GetLeft(), ushr->GetRight());
op->GetBlock()->ReplaceAndRemoveInstructionWith(op, ror);
if (!ushr->HasUses()) {
ushr->GetBlock()->RemoveInstruction(ushr);
@@ -667,7 +668,7 @@
MaybeRecordStat(stats_, kRemovedInstanceOf);
if (outcome && can_be_null) {
// Type test will succeed, we just need a null test.
- HNotEqual* test = new (graph->GetArena()) HNotEqual(graph->GetNullConstant(), object);
+ HNotEqual* test = new (graph->GetAllocator()) HNotEqual(graph->GetNullConstant(), object);
instruction->GetBlock()->InsertInstructionBefore(test, instruction);
instruction->ReplaceWith(test);
} else {
@@ -699,30 +700,30 @@
}
}
-static HCondition* GetOppositeConditionSwapOps(ArenaAllocator* arena, HInstruction* cond) {
+static HCondition* GetOppositeConditionSwapOps(ArenaAllocator* allocator, HInstruction* cond) {
HInstruction *lhs = cond->InputAt(0);
HInstruction *rhs = cond->InputAt(1);
switch (cond->GetKind()) {
case HInstruction::kEqual:
- return new (arena) HEqual(rhs, lhs);
+ return new (allocator) HEqual(rhs, lhs);
case HInstruction::kNotEqual:
- return new (arena) HNotEqual(rhs, lhs);
+ return new (allocator) HNotEqual(rhs, lhs);
case HInstruction::kLessThan:
- return new (arena) HGreaterThan(rhs, lhs);
+ return new (allocator) HGreaterThan(rhs, lhs);
case HInstruction::kLessThanOrEqual:
- return new (arena) HGreaterThanOrEqual(rhs, lhs);
+ return new (allocator) HGreaterThanOrEqual(rhs, lhs);
case HInstruction::kGreaterThan:
- return new (arena) HLessThan(rhs, lhs);
+ return new (allocator) HLessThan(rhs, lhs);
case HInstruction::kGreaterThanOrEqual:
- return new (arena) HLessThanOrEqual(rhs, lhs);
+ return new (allocator) HLessThanOrEqual(rhs, lhs);
case HInstruction::kBelow:
- return new (arena) HAbove(rhs, lhs);
+ return new (allocator) HAbove(rhs, lhs);
case HInstruction::kBelowOrEqual:
- return new (arena) HAboveOrEqual(rhs, lhs);
+ return new (allocator) HAboveOrEqual(rhs, lhs);
case HInstruction::kAbove:
- return new (arena) HBelow(rhs, lhs);
+ return new (allocator) HBelow(rhs, lhs);
case HInstruction::kAboveOrEqual:
- return new (arena) HBelowOrEqual(rhs, lhs);
+ return new (allocator) HBelowOrEqual(rhs, lhs);
default:
LOG(FATAL) << "Unknown ConditionType " << cond->GetKind();
}
@@ -836,7 +837,9 @@
}
// Constructs a new ABS(x) node in the HIR.
-static HInstruction* NewIntegralAbs(ArenaAllocator* arena, HInstruction* x, HInstruction* cursor) {
+static HInstruction* NewIntegralAbs(ArenaAllocator* allocator,
+ HInstruction* x,
+ HInstruction* cursor) {
DataType::Type type = x->GetType();
DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64);
// Construct a fake intrinsic with as much context as is needed to allocate one.
@@ -847,8 +850,8 @@
HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
0u
};
- HInvokeStaticOrDirect* invoke = new (arena) HInvokeStaticOrDirect(
- arena,
+ HInvokeStaticOrDirect* invoke = new (allocator) HInvokeStaticOrDirect(
+ allocator,
1,
type,
x->GetDexPc(),
@@ -939,14 +942,14 @@
if ((cmp == kCondLT || cmp == kCondLE) &&
(a == negated && a == false_value && IsInt64Value(b, 0))) {
// Found a < 0 ? -a : a which can be replaced by ABS(a).
- replace_with = NewIntegralAbs(GetGraph()->GetArena(), false_value, select);
+ replace_with = NewIntegralAbs(GetGraph()->GetAllocator(), false_value, select);
}
} else if (false_value->IsNeg()) {
HInstruction* negated = false_value->InputAt(0);
if ((cmp == kCondGT || cmp == kCondGE) &&
(a == true_value && a == negated && IsInt64Value(b, 0))) {
// Found a > 0 ? a : -a which can be replaced by ABS(a).
- replace_with = NewIntegralAbs(GetGraph()->GetArena(), true_value, select);
+ replace_with = NewIntegralAbs(GetGraph()->GetAllocator(), true_value, select);
}
} else if (true_value->IsSub() && false_value->IsSub()) {
HInstruction* true_sub1 = true_value->InputAt(0);
@@ -961,7 +964,7 @@
// Found a > b ? a - b : b - a or
// a < b ? b - a : a - b
// which can be replaced by ABS(a - b) for lower precision operands a, b.
- replace_with = NewIntegralAbs(GetGraph()->GetArena(), true_value, select);
+ replace_with = NewIntegralAbs(GetGraph()->GetAllocator(), true_value, select);
}
}
}
@@ -1173,7 +1176,8 @@
// particular, we do not want the live range of `b` to be extended if we are
// not sure the initial 'NEG' instruction can be removed.
HInstruction* other = left_is_neg ? right : left;
- HSub* sub = new(GetGraph()->GetArena()) HSub(instruction->GetType(), other, neg->GetInput());
+ HSub* sub =
+ new(GetGraph()->GetAllocator()) HSub(instruction->GetType(), other, neg->GetInput());
instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, sub);
RecordSimplification();
neg->GetBlock()->RemoveInstruction(neg);
@@ -1251,10 +1255,10 @@
DCHECK_NE(new_and_input->GetType(), DataType::Type::kInt64);
HConstant* new_const = GetGraph()->GetConstant(DataType::Type::kInt32, value);
HAnd* new_and =
- new (GetGraph()->GetArena()) HAnd(DataType::Type::kInt32, new_and_input, new_const);
+ new (GetGraph()->GetAllocator()) HAnd(DataType::Type::kInt32, new_and_input, new_const);
instruction->GetBlock()->InsertInstructionBefore(new_and, instruction);
HTypeConversion* new_conversion =
- new (GetGraph()->GetArena()) HTypeConversion(DataType::Type::kInt64, new_and);
+ new (GetGraph()->GetAllocator()) HTypeConversion(DataType::Type::kInt64, new_and);
instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, new_conversion);
input_other->GetBlock()->RemoveInstruction(input_other);
RecordSimplification();
@@ -1279,7 +1283,7 @@
input_other->HasOnlyOneNonEnvironmentUse()) {
DCHECK(input_other->IsShr()); // For UShr, we would have taken the branch above.
// Replace SHR+AND with USHR, for example "(x >> 24) & 0xff" -> "x >>> 24".
- HUShr* ushr = new (GetGraph()->GetArena()) HUShr(instruction->GetType(),
+ HUShr* ushr = new (GetGraph()->GetAllocator()) HUShr(instruction->GetType(),
input_other->InputAt(0),
input_other->InputAt(1),
input_other->GetDexPc());
@@ -1410,7 +1414,8 @@
// on the right hand side.
if (condition->GetLeft()->IsConstant() && !condition->GetRight()->IsConstant()) {
HBasicBlock* block = condition->GetBlock();
- HCondition* replacement = GetOppositeConditionSwapOps(block->GetGraph()->GetArena(), condition);
+ HCondition* replacement =
+ GetOppositeConditionSwapOps(block->GetGraph()->GetAllocator(), condition);
// If it is a fp we must set the opposite bias.
if (replacement != nullptr) {
if (condition->IsLtBias()) {
@@ -1506,7 +1511,7 @@
// with
// NEG dst, src
instruction->GetBlock()->ReplaceAndRemoveInstructionWith(
- instruction, new (GetGraph()->GetArena()) HNeg(type, input_other));
+ instruction, new (GetGraph()->GetAllocator()) HNeg(type, input_other));
RecordSimplification();
return;
}
@@ -1532,7 +1537,7 @@
if (reciprocal != nullptr) {
instruction->GetBlock()->ReplaceAndRemoveInstructionWith(
- instruction, new (GetGraph()->GetArena()) HMul(type, input_other, reciprocal));
+ instruction, new (GetGraph()->GetAllocator()) HMul(type, input_other, reciprocal));
RecordSimplification();
return;
}
@@ -1544,7 +1549,7 @@
HInstruction* input_other = instruction->GetLeastConstantLeft();
DataType::Type type = instruction->GetType();
HBasicBlock* block = instruction->GetBlock();
- ArenaAllocator* allocator = GetGraph()->GetArena();
+ ArenaAllocator* allocator = GetGraph()->GetAllocator();
if (input_cst == nullptr) {
return;
@@ -1683,8 +1688,8 @@
// removed.
// We do not perform optimization for fp because we could lose the sign of zero.
HSub* sub = input->AsSub();
- HSub* new_sub =
- new (GetGraph()->GetArena()) HSub(instruction->GetType(), sub->GetRight(), sub->GetLeft());
+ HSub* new_sub = new (GetGraph()->GetAllocator()) HSub(
+ instruction->GetType(), sub->GetRight(), sub->GetLeft());
instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, new_sub);
if (!sub->HasUses()) {
sub->GetBlock()->RemoveInstruction(sub);
@@ -1786,7 +1791,7 @@
}
HBasicBlock* block = instruction->GetBlock();
- ArenaAllocator* allocator = GetGraph()->GetArena();
+ ArenaAllocator* allocator = GetGraph()->GetAllocator();
HInstruction* left = instruction->GetLeft();
HInstruction* right = instruction->GetRight();
@@ -1818,7 +1823,7 @@
// SUB dst, a, tmp
// with
// ADD dst, a, b
- HAdd* add = new(GetGraph()->GetArena()) HAdd(type, left, right->AsNeg()->GetInput());
+ HAdd* add = new(GetGraph()->GetAllocator()) HAdd(type, left, right->AsNeg()->GetInput());
instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, add);
RecordSimplification();
right->GetBlock()->RemoveInstruction(right);
@@ -1834,9 +1839,9 @@
// NEG dst, tmp
// The second version is not intrinsically better, but enables more
// transformations.
- HAdd* add = new(GetGraph()->GetArena()) HAdd(type, left->AsNeg()->GetInput(), right);
+ HAdd* add = new(GetGraph()->GetAllocator()) HAdd(type, left->AsNeg()->GetInput(), right);
instruction->GetBlock()->InsertInstructionBefore(add, instruction);
- HNeg* neg = new (GetGraph()->GetArena()) HNeg(instruction->GetType(), add);
+ HNeg* neg = new (GetGraph()->GetAllocator()) HNeg(instruction->GetType(), add);
instruction->GetBlock()->InsertInstructionBefore(neg, instruction);
instruction->ReplaceWith(neg);
instruction->GetBlock()->RemoveInstruction(instruction);
@@ -1898,7 +1903,7 @@
// XOR dst, src, 1
// with
// BOOLEAN_NOT dst, src
- HBooleanNot* boolean_not = new (GetGraph()->GetArena()) HBooleanNot(input_other);
+ HBooleanNot* boolean_not = new (GetGraph()->GetAllocator()) HBooleanNot(input_other);
instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, boolean_not);
RecordSimplification();
return;
@@ -1909,7 +1914,7 @@
// XOR dst, src, 0xFFF...FF
// with
// NOT dst, src
- HNot* bitwise_not = new (GetGraph()->GetArena()) HNot(instruction->GetType(), input_other);
+ HNot* bitwise_not = new (GetGraph()->GetAllocator()) HNot(instruction->GetType(), input_other);
instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, bitwise_not);
RecordSimplification();
return;
@@ -1980,10 +1985,10 @@
// Unconditionally set the type of the negated distance to `int`,
// as shift and rotate operations expect a 32-bit (or narrower)
// value for their distance input.
- distance = new (GetGraph()->GetArena()) HNeg(DataType::Type::kInt32, distance);
+ distance = new (GetGraph()->GetAllocator()) HNeg(DataType::Type::kInt32, distance);
invoke->GetBlock()->InsertInstructionBefore(distance, invoke);
}
- HRor* ror = new (GetGraph()->GetArena()) HRor(type, value, distance);
+ HRor* ror = new (GetGraph()->GetAllocator()) HRor(type, value, distance);
invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, ror);
// Remove ClinitCheck and LoadClass, if possible.
HInstruction* clinit = invoke->GetInputs().back();
@@ -2127,7 +2132,7 @@
} else {
right = GetGraph()->GetIntConstant(0);
}
- HCompare* compare = new (GetGraph()->GetArena())
+ HCompare* compare = new (GetGraph()->GetAllocator())
HCompare(type, left, right, ComparisonBias::kNoBias, dex_pc);
invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, compare);
}
@@ -2137,7 +2142,7 @@
uint32_t dex_pc = invoke->GetDexPc();
// IsNaN(x) is the same as x != x.
HInstruction* x = invoke->InputAt(0);
- HCondition* condition = new (GetGraph()->GetArena()) HNotEqual(x, x, dex_pc);
+ HCondition* condition = new (GetGraph()->GetAllocator()) HNotEqual(x, x, dex_pc);
condition->SetBias(ComparisonBias::kLtBias);
invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, condition);
}
@@ -2164,11 +2169,11 @@
kNoThrow);
}
// Test IsNaN(x), which is the same as x != x.
- HCondition* condition = new (GetGraph()->GetArena()) HNotEqual(x, x, dex_pc);
+ HCondition* condition = new (GetGraph()->GetAllocator()) HNotEqual(x, x, dex_pc);
condition->SetBias(ComparisonBias::kLtBias);
invoke->GetBlock()->InsertInstructionBefore(condition, invoke->GetNext());
// Select between the two.
- HInstruction* select = new (GetGraph()->GetArena()) HSelect(condition, nan, invoke, dex_pc);
+ HInstruction* select = new (GetGraph()->GetAllocator()) HSelect(condition, nan, invoke, dex_pc);
invoke->GetBlock()->InsertInstructionBefore(select, condition->GetNext());
invoke->ReplaceWithExceptInReplacementAtIndex(select, 0); // false at index 0
}
@@ -2177,20 +2182,20 @@
HInstruction* str = invoke->InputAt(0);
HInstruction* index = invoke->InputAt(1);
uint32_t dex_pc = invoke->GetDexPc();
- ArenaAllocator* arena = GetGraph()->GetArena();
+ ArenaAllocator* allocator = GetGraph()->GetAllocator();
// We treat String as an array to allow DCE and BCE to seamlessly work on strings,
// so create the HArrayLength, HBoundsCheck and HArrayGet.
- HArrayLength* length = new (arena) HArrayLength(str, dex_pc, /* is_string_length */ true);
+ HArrayLength* length = new (allocator) HArrayLength(str, dex_pc, /* is_string_length */ true);
invoke->GetBlock()->InsertInstructionBefore(length, invoke);
- HBoundsCheck* bounds_check = new (arena) HBoundsCheck(
+ HBoundsCheck* bounds_check = new (allocator) HBoundsCheck(
index, length, dex_pc, invoke->GetDexMethodIndex());
invoke->GetBlock()->InsertInstructionBefore(bounds_check, invoke);
- HArrayGet* array_get = new (arena) HArrayGet(str,
- bounds_check,
- DataType::Type::kUint16,
- SideEffects::None(), // Strings are immutable.
- dex_pc,
- /* is_string_char_at */ true);
+ HArrayGet* array_get = new (allocator) HArrayGet(str,
+ bounds_check,
+ DataType::Type::kUint16,
+ SideEffects::None(), // Strings are immutable.
+ dex_pc,
+ /* is_string_char_at */ true);
invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, array_get);
bounds_check->CopyEnvironmentFrom(invoke->GetEnvironment());
GetGraph()->SetHasBoundsChecks(true);
@@ -2202,13 +2207,13 @@
// We treat String as an array to allow DCE and BCE to seamlessly work on strings,
// so create the HArrayLength.
HArrayLength* length =
- new (GetGraph()->GetArena()) HArrayLength(str, dex_pc, /* is_string_length */ true);
+ new (GetGraph()->GetAllocator()) HArrayLength(str, dex_pc, /* is_string_length */ true);
HInstruction* replacement;
if (invoke->GetIntrinsic() == Intrinsics::kStringIsEmpty) {
// For String.isEmpty(), create the `HEqual` representing the `length == 0`.
invoke->GetBlock()->InsertInstructionBefore(length, invoke);
HIntConstant* zero = GetGraph()->GetIntConstant(0);
- HEqual* equal = new (GetGraph()->GetArena()) HEqual(length, zero, dex_pc);
+ HEqual* equal = new (GetGraph()->GetAllocator()) HEqual(length, zero, dex_pc);
replacement = equal;
} else {
DCHECK_EQ(invoke->GetIntrinsic(), Intrinsics::kStringLength);
@@ -2278,9 +2283,11 @@
}
}
-void InstructionSimplifierVisitor::SimplifyMemBarrier(HInvoke* invoke, MemBarrierKind barrier_kind) {
+void InstructionSimplifierVisitor::SimplifyMemBarrier(HInvoke* invoke,
+ MemBarrierKind barrier_kind) {
uint32_t dex_pc = invoke->GetDexPc();
- HMemoryBarrier* mem_barrier = new (GetGraph()->GetArena()) HMemoryBarrier(barrier_kind, dex_pc);
+ HMemoryBarrier* mem_barrier =
+ new (GetGraph()->GetAllocator()) HMemoryBarrier(barrier_kind, dex_pc);
invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, mem_barrier);
}
@@ -2519,13 +2526,13 @@
int64_t const3_val = ComputeAddition(type, const1_val, const2_val);
HBasicBlock* block = instruction->GetBlock();
HConstant* const3 = block->GetGraph()->GetConstant(type, const3_val);
- ArenaAllocator* arena = instruction->GetArena();
+ ArenaAllocator* allocator = instruction->GetAllocator();
HInstruction* z;
if (is_x_negated) {
- z = new (arena) HSub(type, const3, x, instruction->GetDexPc());
+ z = new (allocator) HSub(type, const3, x, instruction->GetDexPc());
} else {
- z = new (arena) HAdd(type, x, const3, instruction->GetDexPc());
+ z = new (allocator) HAdd(type, x, const3, instruction->GetDexPc());
}
block->ReplaceAndRemoveInstructionWith(instruction, z);
diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc
index 7439893..9422f9f 100644
--- a/compiler/optimizing/instruction_simplifier_arm.cc
+++ b/compiler/optimizing/instruction_simplifier_arm.cc
@@ -137,12 +137,12 @@
if (do_merge) {
HDataProcWithShifterOp* alu_with_op =
- new (GetGraph()->GetArena()) HDataProcWithShifterOp(use,
- other_input,
- bitfield_op->InputAt(0),
- op_kind,
- shift_amount,
- use->GetDexPc());
+ new (GetGraph()->GetAllocator()) HDataProcWithShifterOp(use,
+ other_input,
+ bitfield_op->InputAt(0),
+ op_kind,
+ shift_amount,
+ use->GetDexPc());
use->GetBlock()->ReplaceAndRemoveInstructionWith(use, alu_with_op);
if (bitfield_op->GetUses().empty()) {
bitfield_op->GetBlock()->RemoveInstruction(bitfield_op);
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index c639953..c0ab68f 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -141,12 +141,12 @@
if (do_merge) {
HDataProcWithShifterOp* alu_with_op =
- new (GetGraph()->GetArena()) HDataProcWithShifterOp(use,
- other_input,
- bitfield_op->InputAt(0),
- op_kind,
- shift_amount,
- use->GetDexPc());
+ new (GetGraph()->GetAllocator()) HDataProcWithShifterOp(use,
+ other_input,
+ bitfield_op->InputAt(0),
+ op_kind,
+ shift_amount,
+ use->GetDexPc());
use->GetBlock()->ReplaceAndRemoveInstructionWith(use, alu_with_op);
if (bitfield_op->GetUses().empty()) {
bitfield_op->GetBlock()->RemoveInstruction(bitfield_op);
diff --git a/compiler/optimizing/instruction_simplifier_mips.cc b/compiler/optimizing/instruction_simplifier_mips.cc
index 4bf1bfb..6a0d8a6 100644
--- a/compiler/optimizing/instruction_simplifier_mips.cc
+++ b/compiler/optimizing/instruction_simplifier_mips.cc
@@ -74,7 +74,7 @@
}
HGraph* graph = access->GetBlock()->GetGraph();
- ArenaAllocator* arena = graph->GetArena();
+ ArenaAllocator* allocator = graph->GetAllocator();
size_t component_shift = DataType::SizeShift(packed_type);
bool is_extracting_beneficial = false;
@@ -113,7 +113,7 @@
HIntConstant* shift = graph->GetIntConstant(component_shift);
HIntermediateArrayAddressIndex* address =
- new (arena) HIntermediateArrayAddressIndex(index, shift, kNoDexPc);
+ new (allocator) HIntermediateArrayAddressIndex(index, shift, kNoDexPc);
access->GetBlock()->InsertInstructionBefore(address, access);
access->ReplaceInput(address, 1);
return true;
diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc
index 73d866f..1c13084 100644
--- a/compiler/optimizing/instruction_simplifier_shared.cc
+++ b/compiler/optimizing/instruction_simplifier_shared.cc
@@ -75,8 +75,8 @@
return false;
}
- ArenaAllocator* arena = mul->GetBlock()->GetGraph()->GetArena();
- HMultiplyAccumulate* mulacc = new(arena) HMultiplyAccumulate(
+ ArenaAllocator* allocator = mul->GetBlock()->GetGraph()->GetAllocator();
+ HMultiplyAccumulate* mulacc = new (allocator) HMultiplyAccumulate(
mul->GetType(), op_kind, input_a, input_a, input_b, mul->GetDexPc());
mul->GetBlock()->ReplaceAndRemoveInstructionWith(mul, mulacc);
@@ -105,7 +105,7 @@
return false;
}
- ArenaAllocator* arena = mul->GetBlock()->GetGraph()->GetArena();
+ ArenaAllocator* allocator = mul->GetBlock()->GetGraph()->GetAllocator();
if (mul->HasOnlyOneNonEnvironmentUse()) {
HInstruction* use = mul->GetUses().front().GetUser();
@@ -137,11 +137,11 @@
if (accumulator != nullptr) {
HMultiplyAccumulate* mulacc =
- new (arena) HMultiplyAccumulate(type,
- binop->GetKind(),
- accumulator,
- mul->GetLeft(),
- mul->GetRight());
+ new (allocator) HMultiplyAccumulate(type,
+ binop->GetKind(),
+ accumulator,
+ mul->GetLeft(),
+ mul->GetRight());
binop->GetBlock()->ReplaceAndRemoveInstructionWith(binop, mulacc);
DCHECK(!mul->HasUses());
@@ -150,11 +150,11 @@
}
} else if (use->IsNeg() && isa != kArm) {
HMultiplyAccumulate* mulacc =
- new (arena) HMultiplyAccumulate(type,
- HInstruction::kSub,
- mul->GetBlock()->GetGraph()->GetConstant(type, 0),
- mul->GetLeft(),
- mul->GetRight());
+ new (allocator) HMultiplyAccumulate(type,
+ HInstruction::kSub,
+ mul->GetBlock()->GetGraph()->GetConstant(type, 0),
+ mul->GetLeft(),
+ mul->GetRight());
use->GetBlock()->ReplaceAndRemoveInstructionWith(use, mulacc);
DCHECK(!mul->HasUses());
@@ -216,7 +216,7 @@
// BIC dst, src, mask (respectively ORN, EON)
HInstruction* src = hnot->AsNot()->GetInput();
- HBitwiseNegatedRight* neg_op = new (hnot->GetBlock()->GetGraph()->GetArena())
+ HBitwiseNegatedRight* neg_op = new (hnot->GetBlock()->GetGraph()->GetAllocator())
HBitwiseNegatedRight(op->GetType(), op->GetKind(), hother, src, op->GetDexPc());
op->GetBlock()->ReplaceAndRemoveInstructionWith(op, neg_op);
@@ -255,10 +255,10 @@
// Proceed to extract the base address computation.
HGraph* graph = access->GetBlock()->GetGraph();
- ArenaAllocator* arena = graph->GetArena();
+ ArenaAllocator* allocator = graph->GetAllocator();
HIntConstant* offset = graph->GetIntConstant(data_offset);
- HIntermediateAddress* address = new (arena) HIntermediateAddress(array, offset, kNoDexPc);
+ HIntermediateAddress* address = new (allocator) HIntermediateAddress(array, offset, kNoDexPc);
// TODO: Is it ok to not have this on the intermediate address?
// address->SetReferenceTypeInfo(array->GetReferenceTypeInfo());
access->GetBlock()->InsertInstructionBefore(address, access);
@@ -289,7 +289,7 @@
}
HGraph* graph = access->GetBlock()->GetGraph();
- ArenaAllocator* arena = graph->GetArena();
+ ArenaAllocator* allocator = graph->GetAllocator();
DataType::Type packed_type = access->GetPackedType();
uint32_t data_offset = mirror::Array::DataOffset(
DataType::Size(packed_type)).Uint32Value();
@@ -328,7 +328,7 @@
HIntConstant* offset = graph->GetIntConstant(data_offset);
HIntConstant* shift = graph->GetIntConstant(component_shift);
HIntermediateAddressIndex* address =
- new (arena) HIntermediateAddressIndex(index, offset, shift, kNoDexPc);
+ new (allocator) HIntermediateAddressIndex(index, offset, shift, kNoDexPc);
access->GetBlock()->InsertInstructionBefore(address, access);
access->ReplaceInput(address, 1);
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 0f14d27..dfae534 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -220,7 +220,7 @@
}
// The intrinsic will call if it needs to allocate a j.l.Integer.
- LocationSummary* locations = new (invoke->GetBlock()->GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (invoke->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
if (!invoke->InputAt(0)->IsConstant()) {
locations->SetInAt(0, Location::RequiresRegister());
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 7abfd5b..4429e6e 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -100,7 +100,7 @@
// We're moving potentially two or more locations to locations that could overlap, so we need
// a parallel move resolver.
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
HInstruction* input = invoke->InputAt(i);
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 75a1ce7..ee07c4f 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -70,7 +70,7 @@
}
ArenaAllocator* IntrinsicCodeGeneratorARM64::GetAllocator() {
- return codegen_->GetGraph()->GetArena();
+ return codegen_->GetGraph()->GetAllocator();
}
#define __ codegen->GetVIXLAssembler()->
@@ -236,18 +236,16 @@
#define __ masm->
-static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
}
-static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresFpuRegister());
}
@@ -267,10 +265,10 @@
}
void IntrinsicLocationsBuilderARM64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
@@ -281,10 +279,10 @@
}
void IntrinsicLocationsBuilderARM64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
@@ -294,10 +292,9 @@
MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetVIXLAssembler());
}
-static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -324,7 +321,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitIntegerReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitIntegerReverseBytes(HInvoke* invoke) {
@@ -332,7 +329,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitLongReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitLongReverseBytes(HInvoke* invoke) {
@@ -340,17 +337,16 @@
}
void IntrinsicLocationsBuilderARM64::VisitShortReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitShortReverseBytes(HInvoke* invoke) {
GenReverseBytes(invoke->GetLocations(), DataType::Type::kInt16, GetVIXLAssembler());
}
-static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
@@ -368,7 +364,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
@@ -376,7 +372,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
@@ -396,7 +392,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
@@ -404,7 +400,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
@@ -423,7 +419,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitIntegerReverse(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitIntegerReverse(HInvoke* invoke) {
@@ -431,7 +427,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitLongReverse(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitLongReverse(HInvoke* invoke) {
@@ -456,7 +452,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitLongBitCount(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitLongBitCount(HInvoke* invoke) {
@@ -464,7 +460,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitIntegerBitCount(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitIntegerBitCount(HInvoke* invoke) {
@@ -489,7 +485,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitIntegerHighestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitIntegerHighestOneBit(HInvoke* invoke) {
@@ -497,7 +493,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitLongHighestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitLongHighestOneBit(HInvoke* invoke) {
@@ -518,7 +514,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitIntegerLowestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitIntegerLowestOneBit(HInvoke* invoke) {
@@ -526,17 +522,16 @@
}
void IntrinsicLocationsBuilderARM64::VisitLongLowestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitLongLowestOneBit(HInvoke* invoke) {
GenLowestOneBit(invoke, DataType::Type::kInt64, GetVIXLAssembler());
}
-static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
}
@@ -552,7 +547,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathAbsDouble(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathAbsDouble(HInvoke* invoke) {
@@ -560,7 +555,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathAbsFloat(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathAbsFloat(HInvoke* invoke) {
@@ -581,7 +576,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathAbsInt(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathAbsInt(HInvoke* invoke) {
@@ -589,7 +584,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathAbsLong(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathAbsLong(HInvoke* invoke) {
@@ -614,17 +609,16 @@
}
}
-static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
}
void IntrinsicLocationsBuilderARM64::VisitMathMinDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathMinDoubleDouble(HInvoke* invoke) {
@@ -632,7 +626,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathMinFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathMinFloatFloat(HInvoke* invoke) {
@@ -640,7 +634,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
@@ -648,7 +642,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathMaxFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathMaxFloatFloat(HInvoke* invoke) {
@@ -673,7 +667,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathMinIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathMinIntInt(HInvoke* invoke) {
@@ -681,7 +675,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathMinLongLong(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathMinLongLong(HInvoke* invoke) {
@@ -689,7 +683,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathMaxIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathMaxIntInt(HInvoke* invoke) {
@@ -697,7 +691,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathMaxLongLong(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathMaxLongLong(HInvoke* invoke) {
@@ -705,7 +699,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathSqrt(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathSqrt(HInvoke* invoke) {
@@ -715,7 +709,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathCeil(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathCeil(HInvoke* invoke) {
@@ -725,7 +719,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathFloor(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathFloor(HInvoke* invoke) {
@@ -735,7 +729,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathRint(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathRint(HInvoke* invoke) {
@@ -744,10 +738,9 @@
__ Frintn(DRegisterFrom(locations->Out()), DRegisterFrom(locations->InAt(0)));
}
-static void CreateFPToIntPlusFPTempLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToIntPlusFPTempLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
locations->AddTemp(Location::RequiresFpuRegister());
@@ -791,7 +784,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathRoundDouble(HInvoke* invoke) {
- CreateFPToIntPlusFPTempLocations(arena_, invoke);
+ CreateFPToIntPlusFPTempLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathRoundDouble(HInvoke* invoke) {
@@ -799,7 +792,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathRoundFloat(HInvoke* invoke) {
- CreateFPToIntPlusFPTempLocations(arena_, invoke);
+ CreateFPToIntPlusFPTempLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathRoundFloat(HInvoke* invoke) {
@@ -807,7 +800,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMemoryPeekByte(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPeekByte(HInvoke* invoke) {
@@ -817,7 +810,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMemoryPeekIntNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPeekIntNative(HInvoke* invoke) {
@@ -827,7 +820,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMemoryPeekLongNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPeekLongNative(HInvoke* invoke) {
@@ -837,7 +830,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMemoryPeekShortNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPeekShortNative(HInvoke* invoke) {
@@ -846,16 +839,15 @@
AbsoluteHeapOperandFrom(invoke->GetLocations()->InAt(0), 0));
}
-static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
}
void IntrinsicLocationsBuilderARM64::VisitMemoryPokeByte(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPokeByte(HInvoke* invoke) {
@@ -865,7 +857,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMemoryPokeIntNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPokeIntNative(HInvoke* invoke) {
@@ -875,7 +867,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMemoryPokeLongNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPokeLongNative(HInvoke* invoke) {
@@ -885,7 +877,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMemoryPokeShortNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPokeShortNative(HInvoke* invoke) {
@@ -895,9 +887,8 @@
}
void IntrinsicLocationsBuilderARM64::VisitThreadCurrentThread(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
@@ -949,15 +940,16 @@
}
}
-static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
bool can_call = kEmitCompilerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
if (can_call && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
// We need a temporary register for the read barrier marking slow
@@ -972,22 +964,22 @@
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeGet(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeGetLong(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeGetObject(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafeGet(HInvoke* invoke) {
@@ -1009,10 +1001,9 @@
GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
}
-static void CreateIntIntIntIntToVoid(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -1020,31 +1011,31 @@
}
void IntrinsicLocationsBuilderARM64::VisitUnsafePut(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafePutOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafePutVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafePutObject(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafePutLong(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
static void GenUnsafePut(HInvoke* invoke,
@@ -1151,17 +1142,18 @@
codegen_);
}
-static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena,
+static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator,
HInvoke* invoke,
DataType::Type type) {
bool can_call = kEmitCompilerReadBarrier &&
kUseBakerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -1265,10 +1257,10 @@
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeCASInt(HInvoke* invoke) {
- CreateIntIntIntIntIntToInt(arena_, invoke, DataType::Type::kInt32);
+ CreateIntIntIntIntIntToInt(allocator_, invoke, DataType::Type::kInt32);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeCASLong(HInvoke* invoke) {
- CreateIntIntIntIntIntToInt(arena_, invoke, DataType::Type::kInt64);
+ CreateIntIntIntIntIntToInt(allocator_, invoke, DataType::Type::kInt64);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeCASObject(HInvoke* invoke) {
// The only read barrier implementation supporting the
@@ -1277,7 +1269,7 @@
return;
}
- CreateIntIntIntIntIntToInt(arena_, invoke, DataType::Type::kReference);
+ CreateIntIntIntIntIntToInt(allocator_, invoke, DataType::Type::kReference);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafeCASInt(HInvoke* invoke) {
@@ -1295,11 +1287,12 @@
}
void IntrinsicLocationsBuilderARM64::VisitStringCompareTo(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- invoke->InputAt(1)->CanBeNull()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke,
+ invoke->InputAt(1)->CanBeNull()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
@@ -1526,9 +1519,8 @@
}
void IntrinsicLocationsBuilderARM64::VisitStringEquals(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -1754,9 +1746,8 @@
}
void IntrinsicLocationsBuilderARM64::VisitStringIndexOf(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime calling convention. So it's
// best to align the inputs accordingly.
InvokeRuntimeCallingConvention calling_convention;
@@ -1774,9 +1765,8 @@
}
void IntrinsicLocationsBuilderARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime calling convention. So it's
// best to align the inputs accordingly.
InvokeRuntimeCallingConvention calling_convention;
@@ -1792,9 +1782,8 @@
}
void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromBytes(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
@@ -1819,9 +1808,8 @@
}
void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromChars(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
@@ -1841,9 +1829,8 @@
}
void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromString(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kReference));
@@ -1864,29 +1851,27 @@
__ Bind(slow_path->GetExitLabel());
}
-static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
DCHECK_EQ(invoke->GetNumberOfArguments(), 1U);
DCHECK(DataType::IsFloatingPointType(invoke->InputAt(0)->GetType()));
DCHECK(DataType::IsFloatingPointType(invoke->GetType()));
- LocationSummary* const locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* const locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(calling_convention.GetReturnLocation(invoke->GetType()));
}
-static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
DCHECK_EQ(invoke->GetNumberOfArguments(), 2U);
DCHECK(DataType::IsFloatingPointType(invoke->InputAt(0)->GetType()));
DCHECK(DataType::IsFloatingPointType(invoke->InputAt(1)->GetType()));
DCHECK(DataType::IsFloatingPointType(invoke->GetType()));
- LocationSummary* const locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* const locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0)));
@@ -1901,7 +1886,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathCos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathCos(HInvoke* invoke) {
@@ -1909,7 +1894,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathSin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathSin(HInvoke* invoke) {
@@ -1917,7 +1902,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathAcos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathAcos(HInvoke* invoke) {
@@ -1925,7 +1910,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathAsin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathAsin(HInvoke* invoke) {
@@ -1933,7 +1918,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathAtan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathAtan(HInvoke* invoke) {
@@ -1941,7 +1926,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathCbrt(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathCbrt(HInvoke* invoke) {
@@ -1949,7 +1934,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathCosh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathCosh(HInvoke* invoke) {
@@ -1957,7 +1942,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathExp(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathExp(HInvoke* invoke) {
@@ -1965,7 +1950,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathExpm1(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathExpm1(HInvoke* invoke) {
@@ -1973,7 +1958,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathLog(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathLog(HInvoke* invoke) {
@@ -1981,7 +1966,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathLog10(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathLog10(HInvoke* invoke) {
@@ -1989,7 +1974,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathSinh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathSinh(HInvoke* invoke) {
@@ -1997,7 +1982,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathTan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathTan(HInvoke* invoke) {
@@ -2005,7 +1990,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathTanh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathTanh(HInvoke* invoke) {
@@ -2013,7 +1998,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathAtan2(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathAtan2(HInvoke* invoke) {
@@ -2021,7 +2006,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathHypot(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathHypot(HInvoke* invoke) {
@@ -2029,7 +2014,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathNextAfter(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathNextAfter(HInvoke* invoke) {
@@ -2037,9 +2022,8 @@
}
void IntrinsicLocationsBuilderARM64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -2189,10 +2173,9 @@
}
}
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena();
- LocationSummary* locations = new (allocator) LocationSummary(invoke,
- LocationSummary::kCallOnSlowPath,
- kIntrinsified);
+ ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
// arraycopy(char[] src, int src_pos, char[] dst, int dst_pos, int length).
locations->SetInAt(0, Location::RequiresRegister());
SetSystemArrayCopyLocationRequires(locations, 1, invoke->InputAt(1));
@@ -2428,10 +2411,9 @@
return;
}
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena();
- LocationSummary* locations = new (allocator) LocationSummary(invoke,
- LocationSummary::kCallOnSlowPath,
- kIntrinsified);
+ ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
// arraycopy(Object src, int src_pos, Object dest, int dest_pos, int length).
locations->SetInAt(0, Location::RequiresRegister());
SetSystemArrayCopyLocationRequires(locations, 1, invoke->InputAt(1));
@@ -2937,7 +2919,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitFloatIsInfinite(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitFloatIsInfinite(HInvoke* invoke) {
@@ -2945,7 +2927,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitDoubleIsInfinite(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitDoubleIsInfinite(HInvoke* invoke) {
@@ -3026,9 +3008,8 @@
}
void IntrinsicLocationsBuilderARM64::VisitThreadInterrupted(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
diff --git a/compiler/optimizing/intrinsics_arm64.h b/compiler/optimizing/intrinsics_arm64.h
index 5a6d180..3533c88 100644
--- a/compiler/optimizing/intrinsics_arm64.h
+++ b/compiler/optimizing/intrinsics_arm64.h
@@ -39,8 +39,8 @@
class IntrinsicLocationsBuilderARM64 FINAL : public IntrinsicVisitor {
public:
- explicit IntrinsicLocationsBuilderARM64(ArenaAllocator* arena, CodeGeneratorARM64* codegen)
- : arena_(arena), codegen_(codegen) {}
+ explicit IntrinsicLocationsBuilderARM64(ArenaAllocator* allocator, CodeGeneratorARM64* codegen)
+ : allocator_(allocator), codegen_(codegen) {}
// Define visitor methods.
@@ -57,7 +57,7 @@
bool TryDispatch(HInvoke* invoke);
private:
- ArenaAllocator* arena_;
+ ArenaAllocator* allocator_;
CodeGeneratorARM64* codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderARM64);
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 7ce576c..332306b 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -65,7 +65,7 @@
}
ArenaAllocator* IntrinsicCodeGeneratorARMVIXL::GetAllocator() {
- return codegen_->GetGraph()->GetArena();
+ return codegen_->GetGraph()->GetAllocator();
}
// Default slow-path for fallback (calling the managed code to handle the intrinsic) in an
@@ -246,7 +246,7 @@
};
IntrinsicLocationsBuilderARMVIXL::IntrinsicLocationsBuilderARMVIXL(CodeGeneratorARMVIXL* codegen)
- : arena_(codegen->GetGraph()->GetArena()),
+ : allocator_(codegen->GetGraph()->GetAllocator()),
codegen_(codegen),
assembler_(codegen->GetAssembler()),
features_(codegen->GetInstructionSetFeatures()) {}
@@ -260,18 +260,16 @@
return res->Intrinsified();
}
-static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
}
-static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresFpuRegister());
}
@@ -297,10 +295,10 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
@@ -311,10 +309,10 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
@@ -324,26 +322,23 @@
MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
}
-static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
-static void CreateLongToLongLocationsWithOverlap(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateLongToLongLocationsWithOverlap(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
}
-static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
}
@@ -376,7 +371,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
@@ -384,7 +379,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- CreateLongToLongLocationsWithOverlap(arena_, invoke);
+ CreateLongToLongLocationsWithOverlap(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
@@ -422,7 +417,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
@@ -430,7 +425,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- CreateLongToLongLocationsWithOverlap(arena_, invoke);
+ CreateLongToLongLocationsWithOverlap(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
@@ -442,7 +437,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathAbsDouble(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsDouble(HInvoke* invoke) {
@@ -450,17 +445,16 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathAbsFloat(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsFloat(HInvoke* invoke) {
MathAbsFP(invoke, GetAssembler());
}
-static void CreateIntToIntPlusTemp(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToIntPlusTemp(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
@@ -499,7 +493,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathAbsInt(HInvoke* invoke) {
- CreateIntToIntPlusTemp(arena_, invoke);
+ CreateIntToIntPlusTemp(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsInt(HInvoke* invoke) {
@@ -508,7 +502,7 @@
void IntrinsicLocationsBuilderARMVIXL::VisitMathAbsLong(HInvoke* invoke) {
- CreateIntToIntPlusTemp(arena_, invoke);
+ CreateIntToIntPlusTemp(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsLong(HInvoke* invoke) {
@@ -575,17 +569,16 @@
}
}
-static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresFpuRegister());
locations->SetOut(Location::SameAsFirstInput());
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathMinFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
invoke->GetLocations()->AddTemp(Location::RequiresRegister());
}
@@ -594,7 +587,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
invoke->GetLocations()->AddTemp(Location::RequiresRegister());
}
@@ -654,7 +647,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathMinDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathMinDoubleDouble(HInvoke* invoke) {
@@ -662,7 +655,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxDoubleDouble(HInvoke* invoke) {
@@ -708,17 +701,16 @@
}
}
-static void CreateLongLongToLongLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateLongLongToLongLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathMinLongLong(HInvoke* invoke) {
- CreateLongLongToLongLocations(arena_, invoke);
+ CreateLongLongToLongLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathMinLongLong(HInvoke* invoke) {
@@ -726,7 +718,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxLongLong(HInvoke* invoke) {
- CreateLongLongToLongLocations(arena_, invoke);
+ CreateLongLongToLongLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxLongLong(HInvoke* invoke) {
@@ -751,17 +743,16 @@
}
}
-static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathMinIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathMinIntInt(HInvoke* invoke) {
@@ -769,7 +760,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxIntInt(HInvoke* invoke) {
@@ -777,7 +768,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathSqrt(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathSqrt(HInvoke* invoke) {
@@ -787,7 +778,7 @@
void IntrinsicLocationsBuilderARMVIXL::VisitMathRint(HInvoke* invoke) {
if (features_.HasARMv8AInstructions()) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
}
@@ -799,9 +790,8 @@
void IntrinsicLocationsBuilderARMVIXL::VisitMathRoundFloat(HInvoke* invoke) {
if (features_.HasARMv8AInstructions()) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
locations->AddTemp(Location::RequiresFpuRegister());
@@ -850,7 +840,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekByte(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekByte(HInvoke* invoke) {
@@ -860,7 +850,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekIntNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekIntNative(HInvoke* invoke) {
@@ -870,7 +860,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekLongNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekLongNative(HInvoke* invoke) {
@@ -891,7 +881,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekShortNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekShortNative(HInvoke* invoke) {
@@ -900,16 +890,15 @@
__ Ldrsh(OutputRegister(invoke), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0))));
}
-static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
}
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeByte(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeByte(HInvoke* invoke) {
@@ -918,7 +907,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeIntNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeIntNative(HInvoke* invoke) {
@@ -927,7 +916,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeLongNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeLongNative(HInvoke* invoke) {
@@ -941,7 +930,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeShortNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeShortNative(HInvoke* invoke) {
@@ -950,9 +939,8 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitThreadCurrentThread(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
@@ -1034,17 +1022,18 @@
}
}
-static void CreateIntIntIntToIntLocations(ArenaAllocator* arena,
+static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator,
HInvoke* invoke,
DataType::Type type) {
bool can_call = kEmitCompilerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
if (can_call && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -1061,22 +1050,22 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGet(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetLong(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetObject(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGet(HInvoke* invoke) {
@@ -1098,14 +1087,13 @@
GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
}
-static void CreateIntIntIntIntToVoid(ArenaAllocator* arena,
+static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator,
const ArmInstructionSetFeatures& features,
DataType::Type type,
bool is_volatile,
HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -1126,39 +1114,39 @@
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePut(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- arena_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- arena_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- arena_, features_, DataType::Type::kInt32, /* is_volatile */ true, invoke);
+ allocator_, features_, DataType::Type::kInt32, /* is_volatile */ true, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObject(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- arena_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- arena_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- arena_, features_, DataType::Type::kReference, /* is_volatile */ true, invoke);
+ allocator_, features_, DataType::Type::kReference, /* is_volatile */ true, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLong(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- arena_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLongOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- arena_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLongVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- arena_, features_, DataType::Type::kInt64, /* is_volatile */ true, invoke);
+ allocator_, features_, DataType::Type::kInt64, /* is_volatile */ true, invoke);
}
static void GenUnsafePut(LocationSummary* locations,
@@ -1284,17 +1272,18 @@
codegen_);
}
-static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* arena,
+static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* allocator,
HInvoke* invoke,
DataType::Type type) {
bool can_call = kEmitCompilerReadBarrier &&
kUseBakerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -1427,7 +1416,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeCASInt(HInvoke* invoke) {
- CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke, DataType::Type::kInt32);
+ CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke, DataType::Type::kInt32);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeCASObject(HInvoke* invoke) {
// The only read barrier implementation supporting the
@@ -1436,7 +1425,7 @@
return;
}
- CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke, DataType::Type::kReference);
+ CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke, DataType::Type::kReference);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeCASInt(HInvoke* invoke) {
GenCas(invoke, DataType::Type::kInt32, codegen_);
@@ -1451,11 +1440,12 @@
void IntrinsicLocationsBuilderARMVIXL::VisitStringCompareTo(HInvoke* invoke) {
// The inputs plus one temp.
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- invoke->InputAt(1)->CanBeNull()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke,
+ invoke->InputAt(1)->CanBeNull()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
@@ -1733,9 +1723,8 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringEquals(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -1974,9 +1963,8 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOf(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime calling convention. So it's
// best to align the inputs accordingly.
InvokeRuntimeCallingConventionARMVIXL calling_convention;
@@ -1994,9 +1982,8 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime calling convention. So it's
// best to align the inputs accordingly.
InvokeRuntimeCallingConventionARMVIXL calling_convention;
@@ -2012,9 +1999,8 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringNewStringFromBytes(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
@@ -2037,9 +2023,8 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringNewStringFromChars(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
@@ -2059,9 +2044,8 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringNewStringFromString(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
locations->SetOut(LocationFrom(r0));
@@ -2571,7 +2555,7 @@
__ Bind(intrinsic_slow_path->GetExitLabel());
}
-static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
// If the graph is debuggable, all callee-saved floating-point registers are blocked by
// the code generator. Furthermore, the register allocator creates fixed live intervals
// for all caller-saved registers because we are doing a function call. As a result, if
@@ -2585,9 +2569,8 @@
DCHECK_EQ(invoke->InputAt(0)->GetType(), DataType::Type::kFloat64);
DCHECK_EQ(invoke->GetType(), DataType::Type::kFloat64);
- LocationSummary* const locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* const locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
const InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, Location::RequiresFpuRegister());
@@ -2597,7 +2580,7 @@
locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(1)));
}
-static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
// If the graph is debuggable, all callee-saved floating-point registers are blocked by
// the code generator. Furthermore, the register allocator creates fixed live intervals
// for all caller-saved registers because we are doing a function call. As a result, if
@@ -2612,9 +2595,8 @@
DCHECK_EQ(invoke->InputAt(1)->GetType(), DataType::Type::kFloat64);
DCHECK_EQ(invoke->GetType(), DataType::Type::kFloat64);
- LocationSummary* const locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* const locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
const InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, Location::RequiresFpuRegister());
@@ -2669,7 +2651,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathCos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathCos(HInvoke* invoke) {
@@ -2677,7 +2659,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathSin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathSin(HInvoke* invoke) {
@@ -2685,7 +2667,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathAcos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathAcos(HInvoke* invoke) {
@@ -2693,7 +2675,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathAsin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathAsin(HInvoke* invoke) {
@@ -2701,7 +2683,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathAtan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathAtan(HInvoke* invoke) {
@@ -2709,7 +2691,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathCbrt(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathCbrt(HInvoke* invoke) {
@@ -2717,7 +2699,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathCosh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathCosh(HInvoke* invoke) {
@@ -2725,7 +2707,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathExp(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathExp(HInvoke* invoke) {
@@ -2733,7 +2715,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathExpm1(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathExpm1(HInvoke* invoke) {
@@ -2741,7 +2723,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathLog(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathLog(HInvoke* invoke) {
@@ -2749,7 +2731,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathLog10(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathLog10(HInvoke* invoke) {
@@ -2757,7 +2739,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathSinh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathSinh(HInvoke* invoke) {
@@ -2765,7 +2747,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathTan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathTan(HInvoke* invoke) {
@@ -2773,7 +2755,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathTanh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathTanh(HInvoke* invoke) {
@@ -2781,7 +2763,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathAtan2(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathAtan2(HInvoke* invoke) {
@@ -2789,7 +2771,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathHypot(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathHypot(HInvoke* invoke) {
@@ -2797,7 +2779,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathNextAfter(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathNextAfter(HInvoke* invoke) {
@@ -2805,7 +2787,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitIntegerReverse(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitIntegerReverse(HInvoke* invoke) {
@@ -2814,7 +2796,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitLongReverse(HInvoke* invoke) {
- CreateLongToLongLocationsWithOverlap(arena_, invoke);
+ CreateLongToLongLocationsWithOverlap(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitLongReverse(HInvoke* invoke) {
@@ -2831,7 +2813,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitIntegerReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitIntegerReverseBytes(HInvoke* invoke) {
@@ -2840,7 +2822,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitLongReverseBytes(HInvoke* invoke) {
- CreateLongToLongLocationsWithOverlap(arena_, invoke);
+ CreateLongToLongLocationsWithOverlap(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitLongReverseBytes(HInvoke* invoke) {
@@ -2857,7 +2839,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitShortReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitShortReverseBytes(HInvoke* invoke) {
@@ -2894,7 +2876,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitIntegerBitCount(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
invoke->GetLocations()->AddTemp(Location::RequiresFpuRegister());
}
@@ -2961,7 +2943,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitIntegerHighestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitIntegerHighestOneBit(HInvoke* invoke) {
@@ -2969,7 +2951,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitLongHighestOneBit(HInvoke* invoke) {
- CreateLongToLongLocationsWithOverlap(arena_, invoke);
+ CreateLongToLongLocationsWithOverlap(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitLongHighestOneBit(HInvoke* invoke) {
@@ -3026,7 +3008,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitIntegerLowestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitIntegerLowestOneBit(HInvoke* invoke) {
@@ -3034,7 +3016,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitLongLowestOneBit(HInvoke* invoke) {
- CreateLongToLongLocationsWithOverlap(arena_, invoke);
+ CreateLongToLongLocationsWithOverlap(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitLongLowestOneBit(HInvoke* invoke) {
@@ -3042,9 +3024,8 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -3170,7 +3151,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitFloatIsInfinite(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitFloatIsInfinite(HInvoke* invoke) {
@@ -3188,7 +3169,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitDoubleIsInfinite(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitDoubleIsInfinite(HInvoke* invoke) {
@@ -3215,7 +3196,7 @@
void IntrinsicLocationsBuilderARMVIXL::VisitMathCeil(HInvoke* invoke) {
if (features_.HasARMv8AInstructions()) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
}
@@ -3227,7 +3208,7 @@
void IntrinsicLocationsBuilderARMVIXL::VisitMathFloor(HInvoke* invoke) {
if (features_.HasARMv8AInstructions()) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
}
@@ -3309,9 +3290,8 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitThreadInterrupted(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
diff --git a/compiler/optimizing/intrinsics_arm_vixl.h b/compiler/optimizing/intrinsics_arm_vixl.h
index a4a2830..4f18ca3 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.h
+++ b/compiler/optimizing/intrinsics_arm_vixl.h
@@ -46,7 +46,7 @@
bool TryDispatch(HInvoke* invoke);
private:
- ArenaAllocator* arena_;
+ ArenaAllocator* allocator_;
CodeGenerator* codegen_;
ArmVIXLAssembler* assembler_;
const ArmInstructionSetFeatures& features_;
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 8847256..5f2f71b 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -35,7 +35,7 @@
namespace mips {
IntrinsicLocationsBuilderMIPS::IntrinsicLocationsBuilderMIPS(CodeGeneratorMIPS* codegen)
- : codegen_(codegen), arena_(codegen->GetGraph()->GetArena()) {
+ : codegen_(codegen), allocator_(codegen->GetGraph()->GetAllocator()) {
}
MipsAssembler* IntrinsicCodeGeneratorMIPS::GetAssembler() {
@@ -43,7 +43,7 @@
}
ArenaAllocator* IntrinsicCodeGeneratorMIPS::GetAllocator() {
- return codegen_->GetGraph()->GetArena();
+ return codegen_->GetGraph()->GetAllocator();
}
inline bool IntrinsicCodeGeneratorMIPS::IsR2OrNewer() const {
@@ -152,10 +152,9 @@
#define __ assembler->
-static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
}
@@ -178,7 +177,7 @@
// long java.lang.Double.doubleToRawLongBits(double)
void IntrinsicLocationsBuilderMIPS::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
@@ -187,17 +186,16 @@
// int java.lang.Float.floatToRawIntBits(float)
void IntrinsicLocationsBuilderMIPS::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
}
-static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresFpuRegister());
}
@@ -220,7 +218,7 @@
// double java.lang.Double.longBitsToDouble(long)
void IntrinsicLocationsBuilderMIPS::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
@@ -229,19 +227,18 @@
// float java.lang.Float.intBitsToFloat(int)
void IntrinsicLocationsBuilderMIPS::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitFloatIntBitsToFloat(HInvoke* invoke) {
MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
}
-static void CreateIntToIntLocations(ArenaAllocator* arena,
+static void CreateIntToIntLocations(ArenaAllocator* allocator,
HInvoke* invoke,
Location::OutputOverlap overlaps = Location::kNoOutputOverlap) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), overlaps);
}
@@ -402,7 +399,7 @@
// int java.lang.Integer.reverseBytes(int)
void IntrinsicLocationsBuilderMIPS::VisitIntegerReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerReverseBytes(HInvoke* invoke) {
@@ -416,7 +413,7 @@
// long java.lang.Long.reverseBytes(long)
void IntrinsicLocationsBuilderMIPS::VisitLongReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitLongReverseBytes(HInvoke* invoke) {
@@ -430,7 +427,7 @@
// short java.lang.Short.reverseBytes(short)
void IntrinsicLocationsBuilderMIPS::VisitShortReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitShortReverseBytes(HInvoke* invoke) {
@@ -474,7 +471,7 @@
// int java.lang.Integer.numberOfLeadingZeros(int i)
void IntrinsicLocationsBuilderMIPS::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
@@ -483,7 +480,7 @@
// int java.lang.Long.numberOfLeadingZeros(long i)
void IntrinsicLocationsBuilderMIPS::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
@@ -561,7 +558,7 @@
// int java.lang.Integer.numberOfTrailingZeros(int i)
void IntrinsicLocationsBuilderMIPS::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke, Location::kOutputOverlap);
+ CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap);
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
@@ -570,7 +567,7 @@
// int java.lang.Long.numberOfTrailingZeros(long i)
void IntrinsicLocationsBuilderMIPS::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke, Location::kOutputOverlap);
+ CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap);
}
void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
@@ -579,7 +576,7 @@
// int java.lang.Integer.reverse(int)
void IntrinsicLocationsBuilderMIPS::VisitIntegerReverse(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerReverse(HInvoke* invoke) {
@@ -593,7 +590,7 @@
// long java.lang.Long.reverse(long)
void IntrinsicLocationsBuilderMIPS::VisitLongReverse(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitLongReverse(HInvoke* invoke) {
@@ -605,10 +602,9 @@
GetAssembler());
}
-static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
}
@@ -725,7 +721,7 @@
// int java.lang.Integer.bitCount(int)
void IntrinsicLocationsBuilderMIPS::VisitIntegerBitCount(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerBitCount(HInvoke* invoke) {
@@ -734,9 +730,8 @@
// int java.lang.Long.bitCount(int)
void IntrinsicLocationsBuilderMIPS::VisitLongBitCount(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
@@ -801,7 +796,7 @@
// double java.lang.Math.abs(double)
void IntrinsicLocationsBuilderMIPS::VisitMathAbsDouble(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathAbsDouble(HInvoke* invoke) {
@@ -810,7 +805,7 @@
// float java.lang.Math.abs(float)
void IntrinsicLocationsBuilderMIPS::VisitMathAbsFloat(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathAbsFloat(HInvoke* invoke) {
@@ -847,7 +842,7 @@
// int java.lang.Math.abs(int)
void IntrinsicLocationsBuilderMIPS::VisitMathAbsInt(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathAbsInt(HInvoke* invoke) {
@@ -856,7 +851,7 @@
// long java.lang.Math.abs(long)
void IntrinsicLocationsBuilderMIPS::VisitMathAbsLong(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathAbsLong(HInvoke* invoke) {
@@ -1026,10 +1021,9 @@
}
}
-static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), Location::kOutputOverlap);
@@ -1037,7 +1031,7 @@
// double java.lang.Math.min(double, double)
void IntrinsicLocationsBuilderMIPS::VisitMathMinDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathMinDoubleDouble(HInvoke* invoke) {
@@ -1050,7 +1044,7 @@
// float java.lang.Math.min(float, float)
void IntrinsicLocationsBuilderMIPS::VisitMathMinFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathMinFloatFloat(HInvoke* invoke) {
@@ -1063,7 +1057,7 @@
// double java.lang.Math.max(double, double)
void IntrinsicLocationsBuilderMIPS::VisitMathMaxDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathMaxDoubleDouble(HInvoke* invoke) {
@@ -1076,7 +1070,7 @@
// float java.lang.Math.max(float, float)
void IntrinsicLocationsBuilderMIPS::VisitMathMaxFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathMaxFloatFloat(HInvoke* invoke) {
@@ -1087,10 +1081,9 @@
GetAssembler());
}
-static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
@@ -1267,7 +1260,7 @@
// int java.lang.Math.min(int, int)
void IntrinsicLocationsBuilderMIPS::VisitMathMinIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathMinIntInt(HInvoke* invoke) {
@@ -1280,7 +1273,7 @@
// long java.lang.Math.min(long, long)
void IntrinsicLocationsBuilderMIPS::VisitMathMinLongLong(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathMinLongLong(HInvoke* invoke) {
@@ -1293,7 +1286,7 @@
// int java.lang.Math.max(int, int)
void IntrinsicLocationsBuilderMIPS::VisitMathMaxIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathMaxIntInt(HInvoke* invoke) {
@@ -1306,7 +1299,7 @@
// long java.lang.Math.max(long, long)
void IntrinsicLocationsBuilderMIPS::VisitMathMaxLongLong(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathMaxLongLong(HInvoke* invoke) {
@@ -1319,7 +1312,7 @@
// double java.lang.Math.sqrt(double)
void IntrinsicLocationsBuilderMIPS::VisitMathSqrt(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathSqrt(HInvoke* invoke) {
@@ -1333,7 +1326,7 @@
// byte libcore.io.Memory.peekByte(long address)
void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekByte(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekByte(HInvoke* invoke) {
@@ -1346,7 +1339,7 @@
// short libcore.io.Memory.peekShort(long address)
void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekShortNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekShortNative(HInvoke* invoke) {
@@ -1378,7 +1371,7 @@
// int libcore.io.Memory.peekInt(long address)
void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekIntNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke, Location::kOutputOverlap);
+ CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap);
}
void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekIntNative(HInvoke* invoke) {
@@ -1396,7 +1389,7 @@
// long libcore.io.Memory.peekLong(long address)
void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekLongNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke, Location::kOutputOverlap);
+ CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap);
}
void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekLongNative(HInvoke* invoke) {
@@ -1416,17 +1409,16 @@
}
}
-static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
}
// void libcore.io.Memory.pokeByte(long address, byte value)
void IntrinsicLocationsBuilderMIPS::VisitMemoryPokeByte(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeByte(HInvoke* invoke) {
@@ -1439,7 +1431,7 @@
// void libcore.io.Memory.pokeShort(long address, short value)
void IntrinsicLocationsBuilderMIPS::VisitMemoryPokeShortNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeShortNative(HInvoke* invoke) {
@@ -1461,7 +1453,7 @@
// void libcore.io.Memory.pokeInt(long address, int value)
void IntrinsicLocationsBuilderMIPS::VisitMemoryPokeIntNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeIntNative(HInvoke* invoke) {
@@ -1479,7 +1471,7 @@
// void libcore.io.Memory.pokeLong(long address, long value)
void IntrinsicLocationsBuilderMIPS::VisitMemoryPokeLongNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeLongNative(HInvoke* invoke) {
@@ -1501,9 +1493,8 @@
// Thread java.lang.Thread.currentThread()
void IntrinsicLocationsBuilderMIPS::VisitThreadCurrentThread(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
@@ -1517,17 +1508,18 @@
Thread::PeerOffset<kMipsPointerSize>().Int32Value());
}
-static void CreateIntIntIntToIntLocations(ArenaAllocator* arena,
+static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator,
HInvoke* invoke,
DataType::Type type) {
bool can_call = kEmitCompilerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
if (can_call && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -1657,7 +1649,7 @@
// int sun.misc.Unsafe.getInt(Object o, long offset)
void IntrinsicLocationsBuilderMIPS::VisitUnsafeGet(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGet(HInvoke* invoke) {
@@ -1666,7 +1658,7 @@
// int sun.misc.Unsafe.getIntVolatile(Object o, long offset)
void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) {
@@ -1675,7 +1667,7 @@
// long sun.misc.Unsafe.getLong(Object o, long offset)
void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetLong(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetLong(HInvoke* invoke) {
@@ -1684,7 +1676,7 @@
// Object sun.misc.Unsafe.getObject(Object o, long offset)
void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetObject(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObject(HInvoke* invoke) {
@@ -1693,17 +1685,16 @@
// Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset)
void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, IsR6(), codegen_);
}
-static void CreateIntIntIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -1774,7 +1765,7 @@
// void sun.misc.Unsafe.putInt(Object o, long offset, int x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafePut(HInvoke* invoke) {
- CreateIntIntIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafePut(HInvoke* invoke) {
@@ -1788,7 +1779,7 @@
// void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafePutOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutOrdered(HInvoke* invoke) {
@@ -1802,7 +1793,7 @@
// void sun.misc.Unsafe.putIntVolatile(Object o, long offset, int x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafePutVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutVolatile(HInvoke* invoke) {
@@ -1816,7 +1807,7 @@
// void sun.misc.Unsafe.putObject(Object o, long offset, Object x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObject(HInvoke* invoke) {
- CreateIntIntIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObject(HInvoke* invoke) {
@@ -1830,7 +1821,7 @@
// void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
@@ -1844,7 +1835,7 @@
// void sun.misc.Unsafe.putObjectVolatile(Object o, long offset, Object x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
@@ -1858,7 +1849,7 @@
// void sun.misc.Unsafe.putLong(Object o, long offset, long x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafePutLong(HInvoke* invoke) {
- CreateIntIntIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLong(HInvoke* invoke) {
@@ -1872,7 +1863,7 @@
// void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) {
@@ -1884,15 +1875,16 @@
codegen_);
}
-static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* allocator, HInvoke* invoke) {
bool can_call = kEmitCompilerReadBarrier &&
kUseBakerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -2016,7 +2008,7 @@
// boolean sun.misc.Unsafe.compareAndSwapInt(Object o, long offset, int expected, int x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafeCASInt(HInvoke* invoke) {
- CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
+ CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeCASInt(HInvoke* invoke) {
@@ -2031,7 +2023,7 @@
return;
}
- CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
+ CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeCASObject(HInvoke* invoke) {
@@ -2044,9 +2036,8 @@
// int java.lang.String.compareTo(String anotherString)
void IntrinsicLocationsBuilderMIPS::VisitStringCompareTo(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -2071,9 +2062,8 @@
// boolean java.lang.String.equals(Object anObject)
void IntrinsicLocationsBuilderMIPS::VisitStringEquals(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
@@ -2248,9 +2238,8 @@
// int java.lang.String.indexOf(int ch)
void IntrinsicLocationsBuilderMIPS::VisitStringIndexOf(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime
// calling convention. So it's best to align the inputs accordingly.
InvokeRuntimeCallingConvention calling_convention;
@@ -2273,9 +2262,8 @@
// int java.lang.String.indexOf(int ch, int fromIndex)
void IntrinsicLocationsBuilderMIPS::VisitStringIndexOfAfter(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime
// calling convention. So it's best to align the inputs accordingly.
InvokeRuntimeCallingConvention calling_convention;
@@ -2299,9 +2287,8 @@
// java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromBytes(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -2325,9 +2312,8 @@
// java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromChars(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -2348,9 +2334,8 @@
// java.lang.StringFactory.newStringFromString(String toCopy)
void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromString(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32);
@@ -2411,7 +2396,7 @@
// boolean java.lang.Float.isInfinite(float)
void IntrinsicLocationsBuilderMIPS::VisitFloatIsInfinite(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitFloatIsInfinite(HInvoke* invoke) {
@@ -2420,7 +2405,7 @@
// boolean java.lang.Double.isInfinite(double)
void IntrinsicLocationsBuilderMIPS::VisitDoubleIsInfinite(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitDoubleIsInfinite(HInvoke* invoke) {
@@ -2476,7 +2461,7 @@
// int java.lang.Integer.highestOneBit(int)
void IntrinsicLocationsBuilderMIPS::VisitIntegerHighestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerHighestOneBit(HInvoke* invoke) {
@@ -2485,7 +2470,7 @@
// long java.lang.Long.highestOneBit(long)
void IntrinsicLocationsBuilderMIPS::VisitLongHighestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke, Location::kOutputOverlap);
+ CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap);
}
void IntrinsicCodeGeneratorMIPS::VisitLongHighestOneBit(HInvoke* invoke) {
@@ -2524,7 +2509,7 @@
// int java.lang.Integer.lowestOneBit(int)
void IntrinsicLocationsBuilderMIPS::VisitIntegerLowestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerLowestOneBit(HInvoke* invoke) {
@@ -2533,7 +2518,7 @@
// long java.lang.Long.lowestOneBit(long)
void IntrinsicLocationsBuilderMIPS::VisitLongLowestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitLongLowestOneBit(HInvoke* invoke) {
@@ -2542,9 +2527,8 @@
// int java.lang.Math.round(float)
void IntrinsicLocationsBuilderMIPS::VisitMathRoundFloat(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->AddTemp(Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
@@ -2667,9 +2651,8 @@
// void java.lang.String.getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin)
void IntrinsicLocationsBuilderMIPS::VisitStringGetCharsNoCheck(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -2757,20 +2740,18 @@
__ Bind(&done);
}
-static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kFloat64));
}
-static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
@@ -2804,7 +2785,7 @@
// static double java.lang.Math.cos(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathCos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathCos(HInvoke* invoke) {
@@ -2813,7 +2794,7 @@
// static double java.lang.Math.sin(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathSin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathSin(HInvoke* invoke) {
@@ -2822,7 +2803,7 @@
// static double java.lang.Math.acos(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathAcos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathAcos(HInvoke* invoke) {
@@ -2831,7 +2812,7 @@
// static double java.lang.Math.asin(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathAsin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathAsin(HInvoke* invoke) {
@@ -2840,7 +2821,7 @@
// static double java.lang.Math.atan(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathAtan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathAtan(HInvoke* invoke) {
@@ -2849,7 +2830,7 @@
// static double java.lang.Math.atan2(double y, double x)
void IntrinsicLocationsBuilderMIPS::VisitMathAtan2(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathAtan2(HInvoke* invoke) {
@@ -2858,7 +2839,7 @@
// static double java.lang.Math.cbrt(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathCbrt(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathCbrt(HInvoke* invoke) {
@@ -2867,7 +2848,7 @@
// static double java.lang.Math.cosh(double x)
void IntrinsicLocationsBuilderMIPS::VisitMathCosh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathCosh(HInvoke* invoke) {
@@ -2876,7 +2857,7 @@
// static double java.lang.Math.exp(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathExp(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathExp(HInvoke* invoke) {
@@ -2885,7 +2866,7 @@
// static double java.lang.Math.expm1(double x)
void IntrinsicLocationsBuilderMIPS::VisitMathExpm1(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathExpm1(HInvoke* invoke) {
@@ -2894,7 +2875,7 @@
// static double java.lang.Math.hypot(double x, double y)
void IntrinsicLocationsBuilderMIPS::VisitMathHypot(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathHypot(HInvoke* invoke) {
@@ -2903,7 +2884,7 @@
// static double java.lang.Math.log(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathLog(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathLog(HInvoke* invoke) {
@@ -2912,7 +2893,7 @@
// static double java.lang.Math.log10(double x)
void IntrinsicLocationsBuilderMIPS::VisitMathLog10(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathLog10(HInvoke* invoke) {
@@ -2921,7 +2902,7 @@
// static double java.lang.Math.nextAfter(double start, double direction)
void IntrinsicLocationsBuilderMIPS::VisitMathNextAfter(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathNextAfter(HInvoke* invoke) {
@@ -2930,7 +2911,7 @@
// static double java.lang.Math.sinh(double x)
void IntrinsicLocationsBuilderMIPS::VisitMathSinh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathSinh(HInvoke* invoke) {
@@ -2939,7 +2920,7 @@
// static double java.lang.Math.tan(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathTan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathTan(HInvoke* invoke) {
@@ -2948,7 +2929,7 @@
// static double java.lang.Math.tanh(double x)
void IntrinsicLocationsBuilderMIPS::VisitMathTanh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathTanh(HInvoke* invoke) {
@@ -2982,7 +2963,7 @@
// Okay, it is safe to generate inline code.
LocationSummary* locations =
- new (arena_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
// arraycopy(Object src, int srcPos, Object dest, int destPos, int length).
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
diff --git a/compiler/optimizing/intrinsics_mips.h b/compiler/optimizing/intrinsics_mips.h
index 05d1aa2..afd9548 100644
--- a/compiler/optimizing/intrinsics_mips.h
+++ b/compiler/optimizing/intrinsics_mips.h
@@ -50,7 +50,7 @@
private:
CodeGeneratorMIPS* codegen_;
- ArenaAllocator* arena_;
+ ArenaAllocator* allocator_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS);
};
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index d0234d8..8d5be80 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -35,7 +35,7 @@
namespace mips64 {
IntrinsicLocationsBuilderMIPS64::IntrinsicLocationsBuilderMIPS64(CodeGeneratorMIPS64* codegen)
- : codegen_(codegen), arena_(codegen->GetGraph()->GetArena()) {
+ : codegen_(codegen), allocator_(codegen->GetGraph()->GetAllocator()) {
}
Mips64Assembler* IntrinsicCodeGeneratorMIPS64::GetAssembler() {
@@ -43,7 +43,7 @@
}
ArenaAllocator* IntrinsicCodeGeneratorMIPS64::GetAllocator() {
- return codegen_->GetGraph()->GetArena();
+ return codegen_->GetGraph()->GetAllocator();
}
#define __ codegen->GetAssembler()->
@@ -141,10 +141,9 @@
#define __ assembler->
-static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
}
@@ -162,7 +161,7 @@
// long java.lang.Double.doubleToRawLongBits(double)
void IntrinsicLocationsBuilderMIPS64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
@@ -171,17 +170,16 @@
// int java.lang.Float.floatToRawIntBits(float)
void IntrinsicLocationsBuilderMIPS64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
}
-static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresFpuRegister());
}
@@ -199,7 +197,7 @@
// double java.lang.Double.longBitsToDouble(long)
void IntrinsicLocationsBuilderMIPS64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
@@ -208,17 +206,16 @@
// float java.lang.Float.intBitsToFloat(int)
void IntrinsicLocationsBuilderMIPS64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
}
-static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -250,7 +247,7 @@
// int java.lang.Integer.reverseBytes(int)
void IntrinsicLocationsBuilderMIPS64::VisitIntegerReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerReverseBytes(HInvoke* invoke) {
@@ -259,7 +256,7 @@
// long java.lang.Long.reverseBytes(long)
void IntrinsicLocationsBuilderMIPS64::VisitLongReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitLongReverseBytes(HInvoke* invoke) {
@@ -268,7 +265,7 @@
// short java.lang.Short.reverseBytes(short)
void IntrinsicLocationsBuilderMIPS64::VisitShortReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitShortReverseBytes(HInvoke* invoke) {
@@ -290,7 +287,7 @@
// int java.lang.Integer.numberOfLeadingZeros(int i)
void IntrinsicLocationsBuilderMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
@@ -299,7 +296,7 @@
// int java.lang.Long.numberOfLeadingZeros(long i)
void IntrinsicLocationsBuilderMIPS64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
@@ -327,7 +324,7 @@
// int java.lang.Integer.numberOfTrailingZeros(int i)
void IntrinsicLocationsBuilderMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
@@ -336,7 +333,7 @@
// int java.lang.Long.numberOfTrailingZeros(long i)
void IntrinsicLocationsBuilderMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
@@ -364,7 +361,7 @@
// int java.lang.Integer.reverse(int)
void IntrinsicLocationsBuilderMIPS64::VisitIntegerReverse(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerReverse(HInvoke* invoke) {
@@ -373,17 +370,16 @@
// long java.lang.Long.reverse(long)
void IntrinsicLocationsBuilderMIPS64::VisitLongReverse(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitLongReverse(HInvoke* invoke) {
GenReverse(invoke->GetLocations(), DataType::Type::kInt64, GetAssembler());
}
-static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
}
@@ -458,7 +454,7 @@
// int java.lang.Integer.bitCount(int)
void IntrinsicLocationsBuilderMIPS64::VisitIntegerBitCount(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerBitCount(HInvoke* invoke) {
@@ -467,7 +463,7 @@
// int java.lang.Long.bitCount(long)
void IntrinsicLocationsBuilderMIPS64::VisitLongBitCount(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitLongBitCount(HInvoke* invoke) {
@@ -487,7 +483,7 @@
// double java.lang.Math.abs(double)
void IntrinsicLocationsBuilderMIPS64::VisitMathAbsDouble(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathAbsDouble(HInvoke* invoke) {
@@ -496,17 +492,16 @@
// float java.lang.Math.abs(float)
void IntrinsicLocationsBuilderMIPS64::VisitMathAbsFloat(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathAbsFloat(HInvoke* invoke) {
MathAbsFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
}
-static void CreateIntToInt(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToInt(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -528,7 +523,7 @@
// int java.lang.Math.abs(int)
void IntrinsicLocationsBuilderMIPS64::VisitMathAbsInt(HInvoke* invoke) {
- CreateIntToInt(arena_, invoke);
+ CreateIntToInt(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathAbsInt(HInvoke* invoke) {
@@ -537,7 +532,7 @@
// long java.lang.Math.abs(long)
void IntrinsicLocationsBuilderMIPS64::VisitMathAbsLong(HInvoke* invoke) {
- CreateIntToInt(arena_, invoke);
+ CreateIntToInt(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathAbsLong(HInvoke* invoke) {
@@ -613,10 +608,9 @@
__ Bind(&done);
}
-static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
@@ -624,7 +618,7 @@
// double java.lang.Math.min(double, double)
void IntrinsicLocationsBuilderMIPS64::VisitMathMinDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathMinDoubleDouble(HInvoke* invoke) {
@@ -633,7 +627,7 @@
// float java.lang.Math.min(float, float)
void IntrinsicLocationsBuilderMIPS64::VisitMathMinFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathMinFloatFloat(HInvoke* invoke) {
@@ -642,7 +636,7 @@
// double java.lang.Math.max(double, double)
void IntrinsicLocationsBuilderMIPS64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
@@ -651,7 +645,7 @@
// float java.lang.Math.max(float, float)
void IntrinsicLocationsBuilderMIPS64::VisitMathMaxFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathMaxFloatFloat(HInvoke* invoke) {
@@ -716,10 +710,9 @@
}
}
-static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
@@ -727,7 +720,7 @@
// int java.lang.Math.min(int, int)
void IntrinsicLocationsBuilderMIPS64::VisitMathMinIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathMinIntInt(HInvoke* invoke) {
@@ -736,7 +729,7 @@
// long java.lang.Math.min(long, long)
void IntrinsicLocationsBuilderMIPS64::VisitMathMinLongLong(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathMinLongLong(HInvoke* invoke) {
@@ -745,7 +738,7 @@
// int java.lang.Math.max(int, int)
void IntrinsicLocationsBuilderMIPS64::VisitMathMaxIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathMaxIntInt(HInvoke* invoke) {
@@ -754,7 +747,7 @@
// long java.lang.Math.max(long, long)
void IntrinsicLocationsBuilderMIPS64::VisitMathMaxLongLong(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathMaxLongLong(HInvoke* invoke) {
@@ -763,7 +756,7 @@
// double java.lang.Math.sqrt(double)
void IntrinsicLocationsBuilderMIPS64::VisitMathSqrt(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathSqrt(HInvoke* invoke) {
@@ -775,19 +768,18 @@
__ SqrtD(out, in);
}
-static void CreateFPToFP(ArenaAllocator* arena,
+static void CreateFPToFP(ArenaAllocator* allocator,
HInvoke* invoke,
Location::OutputOverlap overlaps = Location::kOutputOverlap) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), overlaps);
}
// double java.lang.Math.rint(double)
void IntrinsicLocationsBuilderMIPS64::VisitMathRint(HInvoke* invoke) {
- CreateFPToFP(arena_, invoke, Location::kNoOutputOverlap);
+ CreateFPToFP(allocator_, invoke, Location::kNoOutputOverlap);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathRint(HInvoke* invoke) {
@@ -801,7 +793,7 @@
// double java.lang.Math.floor(double)
void IntrinsicLocationsBuilderMIPS64::VisitMathFloor(HInvoke* invoke) {
- CreateFPToFP(arena_, invoke);
+ CreateFPToFP(allocator_, invoke);
}
const constexpr uint16_t kFPLeaveUnchanged = kPositiveZero |
@@ -878,7 +870,7 @@
// double java.lang.Math.ceil(double)
void IntrinsicLocationsBuilderMIPS64::VisitMathCeil(HInvoke* invoke) {
- CreateFPToFP(arena_, invoke);
+ CreateFPToFP(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathCeil(HInvoke* invoke) {
@@ -961,9 +953,8 @@
// int java.lang.Math.round(float)
void IntrinsicLocationsBuilderMIPS64::VisitMathRoundFloat(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->AddTemp(Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
@@ -975,9 +966,8 @@
// long java.lang.Math.round(double)
void IntrinsicLocationsBuilderMIPS64::VisitMathRoundDouble(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->AddTemp(Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
@@ -989,7 +979,7 @@
// byte libcore.io.Memory.peekByte(long address)
void IntrinsicLocationsBuilderMIPS64::VisitMemoryPeekByte(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekByte(HInvoke* invoke) {
@@ -1002,7 +992,7 @@
// short libcore.io.Memory.peekShort(long address)
void IntrinsicLocationsBuilderMIPS64::VisitMemoryPeekShortNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekShortNative(HInvoke* invoke) {
@@ -1015,7 +1005,7 @@
// int libcore.io.Memory.peekInt(long address)
void IntrinsicLocationsBuilderMIPS64::VisitMemoryPeekIntNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekIntNative(HInvoke* invoke) {
@@ -1028,7 +1018,7 @@
// long libcore.io.Memory.peekLong(long address)
void IntrinsicLocationsBuilderMIPS64::VisitMemoryPeekLongNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekLongNative(HInvoke* invoke) {
@@ -1039,17 +1029,16 @@
__ Ld(out, adr, 0);
}
-static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
}
// void libcore.io.Memory.pokeByte(long address, byte value)
void IntrinsicLocationsBuilderMIPS64::VisitMemoryPokeByte(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeByte(HInvoke* invoke) {
@@ -1062,7 +1051,7 @@
// void libcore.io.Memory.pokeShort(long address, short value)
void IntrinsicLocationsBuilderMIPS64::VisitMemoryPokeShortNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeShortNative(HInvoke* invoke) {
@@ -1075,7 +1064,7 @@
// void libcore.io.Memory.pokeInt(long address, int value)
void IntrinsicLocationsBuilderMIPS64::VisitMemoryPokeIntNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeIntNative(HInvoke* invoke) {
@@ -1088,7 +1077,7 @@
// void libcore.io.Memory.pokeLong(long address, long value)
void IntrinsicLocationsBuilderMIPS64::VisitMemoryPokeLongNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeLongNative(HInvoke* invoke) {
@@ -1101,9 +1090,8 @@
// Thread java.lang.Thread.currentThread()
void IntrinsicLocationsBuilderMIPS64::VisitThreadCurrentThread(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
@@ -1117,17 +1105,18 @@
Thread::PeerOffset<kMips64PointerSize>().Int32Value());
}
-static void CreateIntIntIntToIntLocations(ArenaAllocator* arena,
+static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator,
HInvoke* invoke,
DataType::Type type) {
bool can_call = kEmitCompilerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
if (can_call && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -1227,7 +1216,7 @@
// int sun.misc.Unsafe.getInt(Object o, long offset)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGet(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGet(HInvoke* invoke) {
@@ -1236,7 +1225,7 @@
// int sun.misc.Unsafe.getIntVolatile(Object o, long offset)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) {
@@ -1245,7 +1234,7 @@
// long sun.misc.Unsafe.getLong(Object o, long offset)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLong(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLong(HInvoke* invoke) {
@@ -1254,7 +1243,7 @@
// long sun.misc.Unsafe.getLongVolatile(Object o, long offset)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
@@ -1263,7 +1252,7 @@
// Object sun.misc.Unsafe.getObject(Object o, long offset)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObject(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObject(HInvoke* invoke) {
@@ -1272,17 +1261,16 @@
// Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
}
-static void CreateIntIntIntIntToVoid(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -1341,7 +1329,7 @@
// void sun.misc.Unsafe.putInt(Object o, long offset, int x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafePut(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePut(HInvoke* invoke) {
@@ -1354,7 +1342,7 @@
// void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) {
@@ -1367,7 +1355,7 @@
// void sun.misc.Unsafe.putIntVolatile(Object o, long offset, int x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) {
@@ -1380,7 +1368,7 @@
// void sun.misc.Unsafe.putObject(Object o, long offset, Object x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObject(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObject(HInvoke* invoke) {
@@ -1393,7 +1381,7 @@
// void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
@@ -1406,7 +1394,7 @@
// void sun.misc.Unsafe.putObjectVolatile(Object o, long offset, Object x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
@@ -1419,7 +1407,7 @@
// void sun.misc.Unsafe.putLong(Object o, long offset, long x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLong(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLong(HInvoke* invoke) {
@@ -1432,7 +1420,7 @@
// void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
@@ -1445,7 +1433,7 @@
// void sun.misc.Unsafe.putLongVolatile(Object o, long offset, long x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
@@ -1456,15 +1444,16 @@
codegen_);
}
-static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* allocator, HInvoke* invoke) {
bool can_call = kEmitCompilerReadBarrier &&
kUseBakerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -1583,7 +1572,7 @@
// boolean sun.misc.Unsafe.compareAndSwapInt(Object o, long offset, int expected, int x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeCASInt(HInvoke* invoke) {
- CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
+ CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASInt(HInvoke* invoke) {
@@ -1592,7 +1581,7 @@
// boolean sun.misc.Unsafe.compareAndSwapLong(Object o, long offset, long expected, long x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeCASLong(HInvoke* invoke) {
- CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
+ CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASLong(HInvoke* invoke) {
@@ -1607,7 +1596,7 @@
return;
}
- CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
+ CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASObject(HInvoke* invoke) {
@@ -1620,9 +1609,8 @@
// int java.lang.String.compareTo(String anotherString)
void IntrinsicLocationsBuilderMIPS64::VisitStringCompareTo(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -1648,9 +1636,8 @@
// boolean java.lang.String.equals(Object anObject)
void IntrinsicLocationsBuilderMIPS64::VisitStringEquals(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
@@ -1814,9 +1801,8 @@
// int java.lang.String.indexOf(int ch)
void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOf(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime
// calling convention. So it's best to align the inputs accordingly.
InvokeRuntimeCallingConvention calling_convention;
@@ -1835,9 +1821,8 @@
// int java.lang.String.indexOf(int ch, int fromIndex)
void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime
// calling convention. So it's best to align the inputs accordingly.
InvokeRuntimeCallingConvention calling_convention;
@@ -1855,9 +1840,8 @@
// java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromBytes(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -1883,9 +1867,8 @@
// java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromChars(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -1907,9 +1890,8 @@
// java.lang.StringFactory.newStringFromString(String toCopy)
void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromString(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32);
@@ -1948,7 +1930,7 @@
// boolean java.lang.Float.isInfinite(float)
void IntrinsicLocationsBuilderMIPS64::VisitFloatIsInfinite(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitFloatIsInfinite(HInvoke* invoke) {
@@ -1957,7 +1939,7 @@
// boolean java.lang.Double.isInfinite(double)
void IntrinsicLocationsBuilderMIPS64::VisitDoubleIsInfinite(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitDoubleIsInfinite(HInvoke* invoke) {
@@ -1966,9 +1948,8 @@
// void java.lang.String.getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin)
void IntrinsicLocationsBuilderMIPS64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -2083,7 +2064,7 @@
// Okay, it is safe to generate inline code.
LocationSummary* locations =
- new (arena_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
// arraycopy(Object src, int srcPos, Object dest, int destPos, int length).
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
@@ -2277,7 +2258,7 @@
// int java.lang.Integer.highestOneBit(int)
void IntrinsicLocationsBuilderMIPS64::VisitIntegerHighestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerHighestOneBit(HInvoke* invoke) {
@@ -2286,7 +2267,7 @@
// long java.lang.Long.highestOneBit(long)
void IntrinsicLocationsBuilderMIPS64::VisitLongHighestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitLongHighestOneBit(HInvoke* invoke) {
@@ -2311,7 +2292,7 @@
// int java.lang.Integer.lowestOneBit(int)
void IntrinsicLocationsBuilderMIPS64::VisitIntegerLowestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerLowestOneBit(HInvoke* invoke) {
@@ -2320,27 +2301,25 @@
// long java.lang.Long.lowestOneBit(long)
void IntrinsicLocationsBuilderMIPS64::VisitLongLowestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitLongLowestOneBit(HInvoke* invoke) {
GenLowestOneBit(invoke->GetLocations(), DataType::Type::kInt64, GetAssembler());
}
-static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kFloat64));
}
-static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
@@ -2376,7 +2355,7 @@
// static double java.lang.Math.cos(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathCos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathCos(HInvoke* invoke) {
@@ -2385,7 +2364,7 @@
// static double java.lang.Math.sin(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathSin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathSin(HInvoke* invoke) {
@@ -2394,7 +2373,7 @@
// static double java.lang.Math.acos(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathAcos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathAcos(HInvoke* invoke) {
@@ -2403,7 +2382,7 @@
// static double java.lang.Math.asin(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathAsin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathAsin(HInvoke* invoke) {
@@ -2412,7 +2391,7 @@
// static double java.lang.Math.atan(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathAtan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathAtan(HInvoke* invoke) {
@@ -2421,7 +2400,7 @@
// static double java.lang.Math.atan2(double y, double x)
void IntrinsicLocationsBuilderMIPS64::VisitMathAtan2(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathAtan2(HInvoke* invoke) {
@@ -2430,7 +2409,7 @@
// static double java.lang.Math.cbrt(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathCbrt(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathCbrt(HInvoke* invoke) {
@@ -2439,7 +2418,7 @@
// static double java.lang.Math.cosh(double x)
void IntrinsicLocationsBuilderMIPS64::VisitMathCosh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathCosh(HInvoke* invoke) {
@@ -2448,7 +2427,7 @@
// static double java.lang.Math.exp(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathExp(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathExp(HInvoke* invoke) {
@@ -2457,7 +2436,7 @@
// static double java.lang.Math.expm1(double x)
void IntrinsicLocationsBuilderMIPS64::VisitMathExpm1(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathExpm1(HInvoke* invoke) {
@@ -2466,7 +2445,7 @@
// static double java.lang.Math.hypot(double x, double y)
void IntrinsicLocationsBuilderMIPS64::VisitMathHypot(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathHypot(HInvoke* invoke) {
@@ -2475,7 +2454,7 @@
// static double java.lang.Math.log(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathLog(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathLog(HInvoke* invoke) {
@@ -2484,7 +2463,7 @@
// static double java.lang.Math.log10(double x)
void IntrinsicLocationsBuilderMIPS64::VisitMathLog10(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathLog10(HInvoke* invoke) {
@@ -2493,7 +2472,7 @@
// static double java.lang.Math.nextAfter(double start, double direction)
void IntrinsicLocationsBuilderMIPS64::VisitMathNextAfter(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathNextAfter(HInvoke* invoke) {
@@ -2502,7 +2481,7 @@
// static double java.lang.Math.sinh(double x)
void IntrinsicLocationsBuilderMIPS64::VisitMathSinh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathSinh(HInvoke* invoke) {
@@ -2511,7 +2490,7 @@
// static double java.lang.Math.tan(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathTan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathTan(HInvoke* invoke) {
@@ -2520,7 +2499,7 @@
// static double java.lang.Math.tanh(double x)
void IntrinsicLocationsBuilderMIPS64::VisitMathTanh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathTanh(HInvoke* invoke) {
diff --git a/compiler/optimizing/intrinsics_mips64.h b/compiler/optimizing/intrinsics_mips64.h
index 6880a25..6085c7b 100644
--- a/compiler/optimizing/intrinsics_mips64.h
+++ b/compiler/optimizing/intrinsics_mips64.h
@@ -50,7 +50,7 @@
private:
CodeGeneratorMIPS64* codegen_;
- ArenaAllocator* arena_;
+ ArenaAllocator* allocator_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS64);
};
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index a591622..8b389ba 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -46,7 +46,7 @@
static constexpr int32_t kFloatNaN = INT32_C(0x7FC00000);
IntrinsicLocationsBuilderX86::IntrinsicLocationsBuilderX86(CodeGeneratorX86* codegen)
- : arena_(codegen->GetGraph()->GetArena()),
+ : allocator_(codegen->GetGraph()->GetAllocator()),
codegen_(codegen) {
}
@@ -56,7 +56,7 @@
}
ArenaAllocator* IntrinsicCodeGeneratorX86::GetAllocator() {
- return codegen_->GetGraph()->GetArena();
+ return codegen_->GetGraph()->GetAllocator();
}
bool IntrinsicLocationsBuilderX86::TryDispatch(HInvoke* invoke) {
@@ -175,10 +175,9 @@
#define __ assembler->
-static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke, bool is64bit) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is64bit) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
if (is64bit) {
@@ -186,10 +185,9 @@
}
}
-static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke, bool is64bit) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is64bit) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresFpuRegister());
if (is64bit) {
@@ -230,10 +228,10 @@
}
void IntrinsicLocationsBuilderX86::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke, /* is64bit */ true);
+ CreateFPToIntLocations(allocator_, invoke, /* is64bit */ true);
}
void IntrinsicLocationsBuilderX86::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke, /* is64bit */ true);
+ CreateIntToFPLocations(allocator_, invoke, /* is64bit */ true);
}
void IntrinsicCodeGeneratorX86::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
@@ -244,10 +242,10 @@
}
void IntrinsicLocationsBuilderX86::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke, /* is64bit */ false);
+ CreateFPToIntLocations(allocator_, invoke, /* is64bit */ false);
}
void IntrinsicLocationsBuilderX86::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke, /* is64bit */ false);
+ CreateIntToFPLocations(allocator_, invoke, /* is64bit */ false);
}
void IntrinsicCodeGeneratorX86::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
@@ -257,26 +255,23 @@
MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
}
-static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
-static void CreateLongToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateLongToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
-static void CreateLongToLongLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateLongToLongLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
}
@@ -302,7 +297,7 @@
}
void IntrinsicLocationsBuilderX86::VisitIntegerReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitIntegerReverseBytes(HInvoke* invoke) {
@@ -310,7 +305,7 @@
}
void IntrinsicLocationsBuilderX86::VisitLongReverseBytes(HInvoke* invoke) {
- CreateLongToLongLocations(arena_, invoke);
+ CreateLongToLongLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitLongReverseBytes(HInvoke* invoke) {
@@ -331,7 +326,7 @@
}
void IntrinsicLocationsBuilderX86::VisitShortReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitShortReverseBytes(HInvoke* invoke) {
@@ -342,11 +337,10 @@
// TODO: Consider Quick's way of doing Double abs through integer operations, as the immediate we
// need is 64b.
-static void CreateFloatToFloat(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateFloatToFloat(ArenaAllocator* allocator, HInvoke* invoke) {
// TODO: Enable memory operations when the assembler supports them.
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::SameAsFirstInput());
HInvokeStaticOrDirect* static_or_direct = invoke->AsInvokeStaticOrDirect();
@@ -401,7 +395,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathAbsDouble(HInvoke* invoke) {
- CreateFloatToFloat(arena_, invoke);
+ CreateFloatToFloat(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathAbsDouble(HInvoke* invoke) {
@@ -409,17 +403,16 @@
}
void IntrinsicLocationsBuilderX86::VisitMathAbsFloat(HInvoke* invoke) {
- CreateFloatToFloat(arena_, invoke);
+ CreateFloatToFloat(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathAbsFloat(HInvoke* invoke) {
MathAbsFP(invoke, /* is64bit */ false, GetAssembler(), codegen_);
}
-static void CreateAbsIntLocation(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateAbsIntLocation(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RegisterLocation(EAX));
locations->SetOut(Location::SameAsFirstInput());
locations->AddTemp(Location::RegisterLocation(EDX));
@@ -444,10 +437,9 @@
// The result is in EAX.
}
-static void CreateAbsLongLocation(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateAbsLongLocation(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
locations->AddTemp(Location::RequiresRegister());
@@ -480,7 +472,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathAbsInt(HInvoke* invoke) {
- CreateAbsIntLocation(arena_, invoke);
+ CreateAbsIntLocation(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathAbsInt(HInvoke* invoke) {
@@ -488,7 +480,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathAbsLong(HInvoke* invoke) {
- CreateAbsLongLocation(arena_, invoke);
+ CreateAbsLongLocation(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathAbsLong(HInvoke* invoke) {
@@ -598,10 +590,9 @@
__ Bind(&done);
}
-static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresFpuRegister());
// The following is sub-optimal, but all we can do for now. It would be fine to also accept
@@ -616,7 +607,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathMinDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathMinDoubleDouble(HInvoke* invoke) {
@@ -628,7 +619,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathMinFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathMinFloatFloat(HInvoke* invoke) {
@@ -640,7 +631,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathMaxDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathMaxDoubleDouble(HInvoke* invoke) {
@@ -652,7 +643,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathMaxFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathMaxFloatFloat(HInvoke* invoke) {
@@ -718,19 +709,17 @@
}
}
-static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
-static void CreateLongLongToLongLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateLongLongToLongLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
@@ -739,7 +728,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathMinIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathMinIntInt(HInvoke* invoke) {
@@ -747,7 +736,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathMinLongLong(HInvoke* invoke) {
- CreateLongLongToLongLocations(arena_, invoke);
+ CreateLongLongToLongLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathMinLongLong(HInvoke* invoke) {
@@ -755,7 +744,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathMaxIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathMaxIntInt(HInvoke* invoke) {
@@ -763,23 +752,22 @@
}
void IntrinsicLocationsBuilderX86::VisitMathMaxLongLong(HInvoke* invoke) {
- CreateLongLongToLongLocations(arena_, invoke);
+ CreateLongLongToLongLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathMaxLongLong(HInvoke* invoke) {
GenMinMax(invoke->GetLocations(), /* is_min */ false, /* is_long */ true, GetAssembler());
}
-static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister());
}
void IntrinsicLocationsBuilderX86::VisitMathSqrt(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathSqrt(HInvoke* invoke) {
@@ -805,18 +793,18 @@
}
}
-static void CreateSSE41FPToFPLocations(ArenaAllocator* arena,
- HInvoke* invoke,
- CodeGeneratorX86* codegen) {
+static void CreateSSE41FPToFPLocations(ArenaAllocator* allocator,
+ HInvoke* invoke,
+ CodeGeneratorX86* codegen) {
// Do we have instruction support?
if (codegen->GetInstructionSetFeatures().HasSSE4_1()) {
- CreateFPToFPLocations(arena, invoke);
+ CreateFPToFPLocations(allocator, invoke);
return;
}
// We have to fall back to a call to the intrinsic.
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(Location::FpuRegisterLocation(XMM0));
@@ -839,7 +827,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathCeil(HInvoke* invoke) {
- CreateSSE41FPToFPLocations(arena_, invoke, codegen_);
+ CreateSSE41FPToFPLocations(allocator_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitMathCeil(HInvoke* invoke) {
@@ -847,7 +835,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathFloor(HInvoke* invoke) {
- CreateSSE41FPToFPLocations(arena_, invoke, codegen_);
+ CreateSSE41FPToFPLocations(allocator_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitMathFloor(HInvoke* invoke) {
@@ -855,7 +843,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathRint(HInvoke* invoke) {
- CreateSSE41FPToFPLocations(arena_, invoke, codegen_);
+ CreateSSE41FPToFPLocations(allocator_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitMathRint(HInvoke* invoke) {
@@ -867,9 +855,8 @@
if (codegen_->GetInstructionSetFeatures().HasSSE4_1()) {
HInvokeStaticOrDirect* static_or_direct = invoke->AsInvokeStaticOrDirect();
DCHECK(static_or_direct != nullptr);
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
if (static_or_direct->HasSpecialInput() &&
invoke->InputAt(
@@ -883,8 +870,8 @@
}
// We have to fall back to a call to the intrinsic.
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(Location::RegisterLocation(EAX));
@@ -951,11 +938,9 @@
__ Bind(&done);
}
-static void CreateFPToFPCallLocations(ArenaAllocator* arena,
- HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(Location::FpuRegisterLocation(XMM0));
@@ -992,7 +977,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathCos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathCos(HInvoke* invoke) {
@@ -1000,7 +985,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathSin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathSin(HInvoke* invoke) {
@@ -1008,7 +993,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathAcos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathAcos(HInvoke* invoke) {
@@ -1016,7 +1001,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathAsin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathAsin(HInvoke* invoke) {
@@ -1024,7 +1009,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathAtan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathAtan(HInvoke* invoke) {
@@ -1032,7 +1017,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathCbrt(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathCbrt(HInvoke* invoke) {
@@ -1040,7 +1025,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathCosh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathCosh(HInvoke* invoke) {
@@ -1048,7 +1033,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathExp(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathExp(HInvoke* invoke) {
@@ -1056,7 +1041,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathExpm1(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathExpm1(HInvoke* invoke) {
@@ -1064,7 +1049,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathLog(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathLog(HInvoke* invoke) {
@@ -1072,7 +1057,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathLog10(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathLog10(HInvoke* invoke) {
@@ -1080,7 +1065,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathSinh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathSinh(HInvoke* invoke) {
@@ -1088,7 +1073,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathTan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathTan(HInvoke* invoke) {
@@ -1096,18 +1081,16 @@
}
void IntrinsicLocationsBuilderX86::VisitMathTanh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathTanh(HInvoke* invoke) {
GenFPToFPCall(invoke, codegen_, kQuickTanh);
}
-static void CreateFPFPToFPCallLocations(ArenaAllocator* arena,
- HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
@@ -1115,7 +1098,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathAtan2(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathAtan2(HInvoke* invoke) {
@@ -1123,7 +1106,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathHypot(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathHypot(HInvoke* invoke) {
@@ -1131,7 +1114,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathNextAfter(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathNextAfter(HInvoke* invoke) {
@@ -1174,7 +1157,7 @@
// Okay, it is safe to generate inline code.
LocationSummary* locations =
- new (arena_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
// arraycopy(Object src, int srcPos, Object dest, int destPos, int length).
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
@@ -1336,9 +1319,8 @@
void IntrinsicLocationsBuilderX86::VisitStringCompareTo(HInvoke* invoke) {
// The inputs plus one temp.
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -1363,9 +1345,8 @@
}
void IntrinsicLocationsBuilderX86::VisitStringEquals(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -1655,7 +1636,7 @@
}
void IntrinsicLocationsBuilderX86::VisitStringIndexOf(HInvoke* invoke) {
- CreateStringIndexOfLocations(invoke, arena_, /* start_at_zero */ true);
+ CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ true);
}
void IntrinsicCodeGeneratorX86::VisitStringIndexOf(HInvoke* invoke) {
@@ -1663,7 +1644,7 @@
}
void IntrinsicLocationsBuilderX86::VisitStringIndexOfAfter(HInvoke* invoke) {
- CreateStringIndexOfLocations(invoke, arena_, /* start_at_zero */ false);
+ CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ false);
}
void IntrinsicCodeGeneratorX86::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1672,9 +1653,8 @@
}
void IntrinsicLocationsBuilderX86::VisitStringNewStringFromBytes(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -1699,9 +1679,8 @@
}
void IntrinsicLocationsBuilderX86::VisitStringNewStringFromChars(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -1721,9 +1700,8 @@
}
void IntrinsicLocationsBuilderX86::VisitStringNewStringFromString(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetOut(Location::RegisterLocation(EAX));
@@ -1746,9 +1724,8 @@
void IntrinsicLocationsBuilderX86::VisitStringGetCharsNoCheck(HInvoke* invoke) {
// public void getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin);
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
// Place srcEnd in ECX to save a move below.
@@ -1875,7 +1852,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMemoryPeekByte(HInvoke* invoke) {
- CreateLongToIntLocations(arena_, invoke);
+ CreateLongToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMemoryPeekByte(HInvoke* invoke) {
@@ -1883,7 +1860,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMemoryPeekIntNative(HInvoke* invoke) {
- CreateLongToIntLocations(arena_, invoke);
+ CreateLongToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMemoryPeekIntNative(HInvoke* invoke) {
@@ -1891,7 +1868,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMemoryPeekLongNative(HInvoke* invoke) {
- CreateLongToLongLocations(arena_, invoke);
+ CreateLongToLongLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMemoryPeekLongNative(HInvoke* invoke) {
@@ -1899,18 +1876,18 @@
}
void IntrinsicLocationsBuilderX86::VisitMemoryPeekShortNative(HInvoke* invoke) {
- CreateLongToIntLocations(arena_, invoke);
+ CreateLongToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMemoryPeekShortNative(HInvoke* invoke) {
GenPeek(invoke->GetLocations(), DataType::Type::kInt16, GetAssembler());
}
-static void CreateLongIntToVoidLocations(ArenaAllocator* arena, DataType::Type size,
+static void CreateLongIntToVoidLocations(ArenaAllocator* allocator,
+ DataType::Type size,
HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
HInstruction* value = invoke->InputAt(1);
if (size == DataType::Type::kInt8) {
@@ -1967,7 +1944,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMemoryPokeByte(HInvoke* invoke) {
- CreateLongIntToVoidLocations(arena_, DataType::Type::kInt8, invoke);
+ CreateLongIntToVoidLocations(allocator_, DataType::Type::kInt8, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMemoryPokeByte(HInvoke* invoke) {
@@ -1975,7 +1952,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMemoryPokeIntNative(HInvoke* invoke) {
- CreateLongIntToVoidLocations(arena_, DataType::Type::kInt32, invoke);
+ CreateLongIntToVoidLocations(allocator_, DataType::Type::kInt32, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMemoryPokeIntNative(HInvoke* invoke) {
@@ -1983,7 +1960,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMemoryPokeLongNative(HInvoke* invoke) {
- CreateLongIntToVoidLocations(arena_, DataType::Type::kInt64, invoke);
+ CreateLongIntToVoidLocations(allocator_, DataType::Type::kInt64, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMemoryPokeLongNative(HInvoke* invoke) {
@@ -1991,7 +1968,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMemoryPokeShortNative(HInvoke* invoke) {
- CreateLongIntToVoidLocations(arena_, DataType::Type::kInt16, invoke);
+ CreateLongIntToVoidLocations(allocator_, DataType::Type::kInt16, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMemoryPokeShortNative(HInvoke* invoke) {
@@ -1999,9 +1976,8 @@
}
void IntrinsicLocationsBuilderX86::VisitThreadCurrentThread(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
@@ -2071,18 +2047,19 @@
}
}
-static void CreateIntIntIntToIntLocations(ArenaAllocator* arena,
+static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator,
HInvoke* invoke,
DataType::Type type,
bool is_volatile) {
bool can_call = kEmitCompilerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
if (can_call && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -2104,23 +2081,26 @@
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGet(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32, /* is_volatile */ false);
+ CreateIntIntIntToIntLocations(
+ allocator_, invoke, DataType::Type::kInt32, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32, /* is_volatile */ true);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32, /* is_volatile */ true);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetLong(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64, /* is_volatile */ false);
+ CreateIntIntIntToIntLocations(
+ allocator_, invoke, DataType::Type::kInt64, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64, /* is_volatile */ true);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64, /* is_volatile */ true);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetObject(HInvoke* invoke) {
CreateIntIntIntToIntLocations(
- arena_, invoke, DataType::Type::kReference, /* is_volatile */ false);
+ allocator_, invoke, DataType::Type::kReference, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference, /* is_volatile */ true);
+ CreateIntIntIntToIntLocations(
+ allocator_, invoke, DataType::Type::kReference, /* is_volatile */ true);
}
@@ -2144,13 +2124,12 @@
}
-static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* arena,
+static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* allocator,
DataType::Type type,
HInvoke* invoke,
bool is_volatile) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -2168,39 +2147,39 @@
void IntrinsicLocationsBuilderX86::VisitUnsafePut(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- arena_, DataType::Type::kInt32, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- arena_, DataType::Type::kInt32, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- arena_, DataType::Type::kInt32, invoke, /* is_volatile */ true);
+ allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ true);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutObject(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- arena_, DataType::Type::kReference, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kReference, invoke, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- arena_, DataType::Type::kReference, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kReference, invoke, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- arena_, DataType::Type::kReference, invoke, /* is_volatile */ true);
+ allocator_, DataType::Type::kReference, invoke, /* is_volatile */ true);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutLong(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- arena_, DataType::Type::kInt64, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutLongOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- arena_, DataType::Type::kInt64, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutLongVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- arena_, DataType::Type::kInt64, invoke, /* is_volatile */ true);
+ allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ true);
}
// We don't care for ordered: it requires an AnyStore barrier, which is already given by the x86
@@ -2282,17 +2261,18 @@
GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ true, codegen_);
}
-static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena,
+static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator,
DataType::Type type,
HInvoke* invoke) {
bool can_call = kEmitCompilerReadBarrier &&
kUseBakerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
// Offset is a long, but in 32 bit mode, we only need the low word.
@@ -2320,11 +2300,11 @@
}
void IntrinsicLocationsBuilderX86::VisitUnsafeCASInt(HInvoke* invoke) {
- CreateIntIntIntIntIntToInt(arena_, DataType::Type::kInt32, invoke);
+ CreateIntIntIntIntIntToInt(allocator_, DataType::Type::kInt32, invoke);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeCASLong(HInvoke* invoke) {
- CreateIntIntIntIntIntToInt(arena_, DataType::Type::kInt64, invoke);
+ CreateIntIntIntIntIntToInt(allocator_, DataType::Type::kInt64, invoke);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeCASObject(HInvoke* invoke) {
@@ -2334,7 +2314,7 @@
return;
}
- CreateIntIntIntIntIntToInt(arena_, DataType::Type::kReference, invoke);
+ CreateIntIntIntIntIntToInt(allocator_, DataType::Type::kReference, invoke);
}
static void GenCAS(DataType::Type type, HInvoke* invoke, CodeGeneratorX86* codegen) {
@@ -2473,9 +2453,8 @@
}
void IntrinsicLocationsBuilderX86::VisitIntegerReverse(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
locations->AddTemp(Location::RequiresRegister());
@@ -2516,9 +2495,8 @@
}
void IntrinsicLocationsBuilderX86::VisitLongReverse(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
locations->AddTemp(Location::RequiresRegister());
@@ -2553,15 +2531,14 @@
}
static void CreateBitCountLocations(
- ArenaAllocator* arena, CodeGeneratorX86* codegen, HInvoke* invoke, bool is_long) {
+ ArenaAllocator* allocator, CodeGeneratorX86* codegen, HInvoke* invoke, bool is_long) {
if (!codegen->GetInstructionSetFeatures().HasPopCnt()) {
// Do nothing if there is no popcnt support. This results in generating
// a call for the intrinsic rather than direct code.
return;
}
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
if (is_long) {
locations->AddTemp(Location::RequiresRegister());
}
@@ -2610,7 +2587,7 @@
}
void IntrinsicLocationsBuilderX86::VisitIntegerBitCount(HInvoke* invoke) {
- CreateBitCountLocations(arena_, codegen_, invoke, /* is_long */ false);
+ CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long */ false);
}
void IntrinsicCodeGeneratorX86::VisitIntegerBitCount(HInvoke* invoke) {
@@ -2618,17 +2595,16 @@
}
void IntrinsicLocationsBuilderX86::VisitLongBitCount(HInvoke* invoke) {
- CreateBitCountLocations(arena_, codegen_, invoke, /* is_long */ true);
+ CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long */ true);
}
void IntrinsicCodeGeneratorX86::VisitLongBitCount(HInvoke* invoke) {
GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ true);
}
-static void CreateLeadingZeroLocations(ArenaAllocator* arena, HInvoke* invoke, bool is_long) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateLeadingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_long) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
if (is_long) {
locations->SetInAt(0, Location::RequiresRegister());
} else {
@@ -2715,7 +2691,7 @@
}
void IntrinsicLocationsBuilderX86::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- CreateLeadingZeroLocations(arena_, invoke, /* is_long */ false);
+ CreateLeadingZeroLocations(allocator_, invoke, /* is_long */ false);
}
void IntrinsicCodeGeneratorX86::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
@@ -2723,17 +2699,16 @@
}
void IntrinsicLocationsBuilderX86::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- CreateLeadingZeroLocations(arena_, invoke, /* is_long */ true);
+ CreateLeadingZeroLocations(allocator_, invoke, /* is_long */ true);
}
void IntrinsicCodeGeneratorX86::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
}
-static void CreateTrailingZeroLocations(ArenaAllocator* arena, HInvoke* invoke, bool is_long) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateTrailingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_long) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
if (is_long) {
locations->SetInAt(0, Location::RequiresRegister());
} else {
@@ -2807,7 +2782,7 @@
}
void IntrinsicLocationsBuilderX86::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- CreateTrailingZeroLocations(arena_, invoke, /* is_long */ false);
+ CreateTrailingZeroLocations(allocator_, invoke, /* is_long */ false);
}
void IntrinsicCodeGeneratorX86::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
@@ -2815,7 +2790,7 @@
}
void IntrinsicLocationsBuilderX86::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- CreateTrailingZeroLocations(arena_, invoke, /* is_long */ true);
+ CreateTrailingZeroLocations(allocator_, invoke, /* is_long */ true);
}
void IntrinsicCodeGeneratorX86::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
@@ -3352,9 +3327,8 @@
}
void IntrinsicLocationsBuilderX86::VisitThreadInterrupted(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
diff --git a/compiler/optimizing/intrinsics_x86.h b/compiler/optimizing/intrinsics_x86.h
index 22f11b1..ba3ca0a 100644
--- a/compiler/optimizing/intrinsics_x86.h
+++ b/compiler/optimizing/intrinsics_x86.h
@@ -49,7 +49,7 @@
bool TryDispatch(HInvoke* invoke);
private:
- ArenaAllocator* arena_;
+ ArenaAllocator* allocator_;
CodeGeneratorX86* codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderX86);
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index a2545ee..6337900 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -41,7 +41,7 @@
namespace x86_64 {
IntrinsicLocationsBuilderX86_64::IntrinsicLocationsBuilderX86_64(CodeGeneratorX86_64* codegen)
- : arena_(codegen->GetGraph()->GetArena()), codegen_(codegen) {
+ : allocator_(codegen->GetGraph()->GetAllocator()), codegen_(codegen) {
}
X86_64Assembler* IntrinsicCodeGeneratorX86_64::GetAssembler() {
@@ -49,7 +49,7 @@
}
ArenaAllocator* IntrinsicCodeGeneratorX86_64::GetAllocator() {
- return codegen_->GetGraph()->GetArena();
+ return codegen_->GetGraph()->GetAllocator();
}
bool IntrinsicLocationsBuilderX86_64::TryDispatch(HInvoke* invoke) {
@@ -128,18 +128,16 @@
#define __ assembler->
-static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
}
-static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresFpuRegister());
}
@@ -157,10 +155,10 @@
}
void IntrinsicLocationsBuilderX86_64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
@@ -171,10 +169,10 @@
}
void IntrinsicLocationsBuilderX86_64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
@@ -184,10 +182,9 @@
MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
}
-static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
@@ -216,7 +213,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerReverseBytes(HInvoke* invoke) {
@@ -224,7 +221,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitLongReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitLongReverseBytes(HInvoke* invoke) {
@@ -232,7 +229,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitShortReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitShortReverseBytes(HInvoke* invoke) {
@@ -243,11 +240,10 @@
// TODO: Consider Quick's way of doing Double abs through integer operations, as the immediate we
// need is 64b.
-static void CreateFloatToFloatPlusTemps(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateFloatToFloatPlusTemps(ArenaAllocator* allocator, HInvoke* invoke) {
// TODO: Enable memory operations when the assembler supports them.
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::SameAsFirstInput());
locations->AddTemp(Location::RequiresFpuRegister()); // FP reg to hold mask.
@@ -275,7 +271,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathAbsDouble(HInvoke* invoke) {
- CreateFloatToFloatPlusTemps(arena_, invoke);
+ CreateFloatToFloatPlusTemps(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathAbsDouble(HInvoke* invoke) {
@@ -283,17 +279,16 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathAbsFloat(HInvoke* invoke) {
- CreateFloatToFloatPlusTemps(arena_, invoke);
+ CreateFloatToFloatPlusTemps(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathAbsFloat(HInvoke* invoke) {
MathAbsFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler(), codegen_);
}
-static void CreateIntToIntPlusTemp(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToIntPlusTemp(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
locations->AddTemp(Location::RequiresRegister());
@@ -322,7 +317,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathAbsInt(HInvoke* invoke) {
- CreateIntToIntPlusTemp(arena_, invoke);
+ CreateIntToIntPlusTemp(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathAbsInt(HInvoke* invoke) {
@@ -330,7 +325,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathAbsLong(HInvoke* invoke) {
- CreateIntToIntPlusTemp(arena_, invoke);
+ CreateIntToIntPlusTemp(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathAbsLong(HInvoke* invoke) {
@@ -421,10 +416,9 @@
__ Bind(&done);
}
-static void CreateFPFPToFP(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPFPToFP(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresFpuRegister());
// The following is sub-optimal, but all we can do for now. It would be fine to also accept
@@ -433,7 +427,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathMinDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFP(arena_, invoke);
+ CreateFPFPToFP(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathMinDoubleDouble(HInvoke* invoke) {
@@ -442,7 +436,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathMinFloatFloat(HInvoke* invoke) {
- CreateFPFPToFP(arena_, invoke);
+ CreateFPFPToFP(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathMinFloatFloat(HInvoke* invoke) {
@@ -451,7 +445,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFP(arena_, invoke);
+ CreateFPFPToFP(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
@@ -460,7 +454,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathMaxFloatFloat(HInvoke* invoke) {
- CreateFPFPToFP(arena_, invoke);
+ CreateFPFPToFP(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathMaxFloatFloat(HInvoke* invoke) {
@@ -500,17 +494,16 @@
__ cmov(is_min ? Condition::kGreater : Condition::kLess, out, op2, is_long);
}
-static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
void IntrinsicLocationsBuilderX86_64::VisitMathMinIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathMinIntInt(HInvoke* invoke) {
@@ -518,7 +511,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathMinLongLong(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathMinLongLong(HInvoke* invoke) {
@@ -526,7 +519,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathMaxIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathMaxIntInt(HInvoke* invoke) {
@@ -534,23 +527,22 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathMaxLongLong(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathMaxLongLong(HInvoke* invoke) {
GenMinMax(invoke->GetLocations(), /* is_min */ false, /* is_long */ true, GetAssembler());
}
-static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister());
}
void IntrinsicLocationsBuilderX86_64::VisitMathSqrt(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathSqrt(HInvoke* invoke) {
@@ -576,18 +568,18 @@
}
}
-static void CreateSSE41FPToFPLocations(ArenaAllocator* arena,
- HInvoke* invoke,
- CodeGeneratorX86_64* codegen) {
+static void CreateSSE41FPToFPLocations(ArenaAllocator* allocator,
+ HInvoke* invoke,
+ CodeGeneratorX86_64* codegen) {
// Do we have instruction support?
if (codegen->GetInstructionSetFeatures().HasSSE4_1()) {
- CreateFPToFPLocations(arena, invoke);
+ CreateFPToFPLocations(allocator, invoke);
return;
}
// We have to fall back to a call to the intrinsic.
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(Location::FpuRegisterLocation(XMM0));
@@ -610,7 +602,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathCeil(HInvoke* invoke) {
- CreateSSE41FPToFPLocations(arena_, invoke, codegen_);
+ CreateSSE41FPToFPLocations(allocator_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitMathCeil(HInvoke* invoke) {
@@ -618,7 +610,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathFloor(HInvoke* invoke) {
- CreateSSE41FPToFPLocations(arena_, invoke, codegen_);
+ CreateSSE41FPToFPLocations(allocator_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitMathFloor(HInvoke* invoke) {
@@ -626,21 +618,20 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathRint(HInvoke* invoke) {
- CreateSSE41FPToFPLocations(arena_, invoke, codegen_);
+ CreateSSE41FPToFPLocations(allocator_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitMathRint(HInvoke* invoke) {
GenSSE41FPToFPIntrinsic(codegen_, invoke, GetAssembler(), 0);
}
-static void CreateSSE41FPToIntLocations(ArenaAllocator* arena,
- HInvoke* invoke,
- CodeGeneratorX86_64* codegen) {
+static void CreateSSE41FPToIntLocations(ArenaAllocator* allocator,
+ HInvoke* invoke,
+ CodeGeneratorX86_64* codegen) {
// Do we have instruction support?
if (codegen->GetInstructionSetFeatures().HasSSE4_1()) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
locations->AddTemp(Location::RequiresFpuRegister());
@@ -649,8 +640,8 @@
}
// We have to fall back to a call to the intrinsic.
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(Location::RegisterLocation(RAX));
@@ -659,7 +650,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathRoundFloat(HInvoke* invoke) {
- CreateSSE41FPToIntLocations(arena_, invoke, codegen_);
+ CreateSSE41FPToIntLocations(allocator_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitMathRoundFloat(HInvoke* invoke) {
@@ -703,7 +694,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathRoundDouble(HInvoke* invoke) {
- CreateSSE41FPToIntLocations(arena_, invoke, codegen_);
+ CreateSSE41FPToIntLocations(allocator_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitMathRoundDouble(HInvoke* invoke) {
@@ -746,11 +737,9 @@
__ Bind(&done);
}
-static void CreateFPToFPCallLocations(ArenaAllocator* arena,
- HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(Location::FpuRegisterLocation(XMM0));
@@ -773,7 +762,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathCos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathCos(HInvoke* invoke) {
@@ -781,7 +770,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathSin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathSin(HInvoke* invoke) {
@@ -789,7 +778,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathAcos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathAcos(HInvoke* invoke) {
@@ -797,7 +786,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathAsin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathAsin(HInvoke* invoke) {
@@ -805,7 +794,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathAtan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathAtan(HInvoke* invoke) {
@@ -813,7 +802,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathCbrt(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathCbrt(HInvoke* invoke) {
@@ -821,7 +810,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathCosh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathCosh(HInvoke* invoke) {
@@ -829,7 +818,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathExp(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathExp(HInvoke* invoke) {
@@ -837,7 +826,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathExpm1(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathExpm1(HInvoke* invoke) {
@@ -845,7 +834,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathLog(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathLog(HInvoke* invoke) {
@@ -853,7 +842,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathLog10(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathLog10(HInvoke* invoke) {
@@ -861,7 +850,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathSinh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathSinh(HInvoke* invoke) {
@@ -869,7 +858,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathTan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathTan(HInvoke* invoke) {
@@ -877,18 +866,16 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathTanh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathTanh(HInvoke* invoke) {
GenFPToFPCall(invoke, codegen_, kQuickTanh);
}
-static void CreateFPFPToFPCallLocations(ArenaAllocator* arena,
- HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
@@ -903,7 +890,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathAtan2(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathAtan2(HInvoke* invoke) {
@@ -911,7 +898,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathHypot(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathHypot(HInvoke* invoke) {
@@ -919,7 +906,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathNextAfter(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathNextAfter(HInvoke* invoke) {
@@ -949,9 +936,8 @@
}
}
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnSlowPath,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
// arraycopy(Object src, int src_pos, Object dest, int dest_pos, int length).
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
@@ -1507,9 +1493,8 @@
}
void IntrinsicLocationsBuilderX86_64::VisitStringCompareTo(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -1534,9 +1519,8 @@
}
void IntrinsicLocationsBuilderX86_64::VisitStringEquals(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -1812,7 +1796,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitStringIndexOf(HInvoke* invoke) {
- CreateStringIndexOfLocations(invoke, arena_, /* start_at_zero */ true);
+ CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ true);
}
void IntrinsicCodeGeneratorX86_64::VisitStringIndexOf(HInvoke* invoke) {
@@ -1820,7 +1804,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitStringIndexOfAfter(HInvoke* invoke) {
- CreateStringIndexOfLocations(invoke, arena_, /* start_at_zero */ false);
+ CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ false);
}
void IntrinsicCodeGeneratorX86_64::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1829,9 +1813,8 @@
}
void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromBytes(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -1856,9 +1839,8 @@
}
void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromChars(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -1878,9 +1860,8 @@
}
void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromString(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetOut(Location::RegisterLocation(RAX));
@@ -1903,9 +1884,8 @@
void IntrinsicLocationsBuilderX86_64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
// public void getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin);
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
locations->SetInAt(2, Location::RequiresRegister());
@@ -2018,7 +1998,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMemoryPeekByte(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekByte(HInvoke* invoke) {
@@ -2026,7 +2006,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMemoryPeekIntNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekIntNative(HInvoke* invoke) {
@@ -2034,7 +2014,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMemoryPeekLongNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekLongNative(HInvoke* invoke) {
@@ -2042,17 +2022,16 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMemoryPeekShortNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekShortNative(HInvoke* invoke) {
GenPeek(invoke->GetLocations(), DataType::Type::kInt16, GetAssembler());
}
-static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrInt32Constant(invoke->InputAt(1)));
}
@@ -2104,7 +2083,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMemoryPokeByte(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeByte(HInvoke* invoke) {
@@ -2112,7 +2091,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMemoryPokeIntNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeIntNative(HInvoke* invoke) {
@@ -2120,7 +2099,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMemoryPokeLongNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeLongNative(HInvoke* invoke) {
@@ -2128,7 +2107,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMemoryPokeShortNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeShortNative(HInvoke* invoke) {
@@ -2136,9 +2115,8 @@
}
void IntrinsicLocationsBuilderX86_64::VisitThreadCurrentThread(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
@@ -2194,15 +2172,16 @@
}
}
-static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
bool can_call = kEmitCompilerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
if (can_call && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -2214,22 +2193,22 @@
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeGet(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetLong(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetObject(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
@@ -2253,12 +2232,11 @@
}
-static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* arena,
+static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* allocator,
DataType::Type type,
HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -2271,31 +2249,31 @@
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafePut(HInvoke* invoke) {
- CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt32, invoke);
+ CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kInt32, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafePutOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt32, invoke);
+ CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kInt32, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafePutVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt32, invoke);
+ CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kInt32, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafePutObject(HInvoke* invoke) {
- CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kReference, invoke);
+ CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kReference, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kReference, invoke);
+ CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kReference, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kReference, invoke);
+ CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kReference, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafePutLong(HInvoke* invoke) {
- CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt64, invoke);
+ CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kInt64, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt64, invoke);
+ CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kInt64, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt64, invoke);
+ CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kInt64, invoke);
}
// We don't care for ordered: it requires an AnyStore barrier, which is already given by the x86
@@ -2363,17 +2341,18 @@
GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ true, codegen_);
}
-static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena,
+static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator,
DataType::Type type,
HInvoke* invoke) {
bool can_call = kEmitCompilerReadBarrier &&
kUseBakerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -2391,11 +2370,11 @@
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASInt(HInvoke* invoke) {
- CreateIntIntIntIntIntToInt(arena_, DataType::Type::kInt32, invoke);
+ CreateIntIntIntIntIntToInt(allocator_, DataType::Type::kInt32, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASLong(HInvoke* invoke) {
- CreateIntIntIntIntIntToInt(arena_, DataType::Type::kInt64, invoke);
+ CreateIntIntIntIntIntToInt(allocator_, DataType::Type::kInt64, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASObject(HInvoke* invoke) {
@@ -2405,7 +2384,7 @@
return;
}
- CreateIntIntIntIntIntToInt(arena_, DataType::Type::kReference, invoke);
+ CreateIntIntIntIntIntToInt(allocator_, DataType::Type::kReference, invoke);
}
static void GenCAS(DataType::Type type, HInvoke* invoke, CodeGeneratorX86_64* codegen) {
@@ -2537,9 +2516,8 @@
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerReverse(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
locations->AddTemp(Location::RequiresRegister());
@@ -2580,9 +2558,8 @@
}
void IntrinsicLocationsBuilderX86_64::VisitLongReverse(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
locations->AddTemp(Location::RequiresRegister());
@@ -2625,15 +2602,14 @@
}
static void CreateBitCountLocations(
- ArenaAllocator* arena, CodeGeneratorX86_64* codegen, HInvoke* invoke) {
+ ArenaAllocator* allocator, CodeGeneratorX86_64* codegen, HInvoke* invoke) {
if (!codegen->GetInstructionSetFeatures().HasPopCnt()) {
// Do nothing if there is no popcnt support. This results in generating
// a call for the intrinsic rather than direct code.
return;
}
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresRegister());
}
@@ -2672,7 +2648,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerBitCount(HInvoke* invoke) {
- CreateBitCountLocations(arena_, codegen_, invoke);
+ CreateBitCountLocations(allocator_, codegen_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerBitCount(HInvoke* invoke) {
@@ -2680,17 +2656,16 @@
}
void IntrinsicLocationsBuilderX86_64::VisitLongBitCount(HInvoke* invoke) {
- CreateBitCountLocations(arena_, codegen_, invoke);
+ CreateBitCountLocations(allocator_, codegen_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitLongBitCount(HInvoke* invoke) {
GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ true);
}
-static void CreateOneBitLocations(ArenaAllocator* arena, HInvoke* invoke, bool is_high) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateOneBitLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_high) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresRegister());
locations->AddTemp(is_high ? Location::RegisterLocation(RCX) // needs CL
@@ -2787,7 +2762,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerHighestOneBit(HInvoke* invoke) {
- CreateOneBitLocations(arena_, invoke, /* is_high */ true);
+ CreateOneBitLocations(allocator_, invoke, /* is_high */ true);
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerHighestOneBit(HInvoke* invoke) {
@@ -2795,7 +2770,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitLongHighestOneBit(HInvoke* invoke) {
- CreateOneBitLocations(arena_, invoke, /* is_high */ true);
+ CreateOneBitLocations(allocator_, invoke, /* is_high */ true);
}
void IntrinsicCodeGeneratorX86_64::VisitLongHighestOneBit(HInvoke* invoke) {
@@ -2803,7 +2778,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerLowestOneBit(HInvoke* invoke) {
- CreateOneBitLocations(arena_, invoke, /* is_high */ false);
+ CreateOneBitLocations(allocator_, invoke, /* is_high */ false);
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerLowestOneBit(HInvoke* invoke) {
@@ -2811,17 +2786,16 @@
}
void IntrinsicLocationsBuilderX86_64::VisitLongLowestOneBit(HInvoke* invoke) {
- CreateOneBitLocations(arena_, invoke, /* is_high */ false);
+ CreateOneBitLocations(allocator_, invoke, /* is_high */ false);
}
void IntrinsicCodeGeneratorX86_64::VisitLongLowestOneBit(HInvoke* invoke) {
GenOneBit(GetAssembler(), codegen_, invoke, /* is_high */ false, /* is_long */ true);
}
-static void CreateLeadingZeroLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateLeadingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresRegister());
}
@@ -2877,7 +2851,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- CreateLeadingZeroLocations(arena_, invoke);
+ CreateLeadingZeroLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
@@ -2885,17 +2859,16 @@
}
void IntrinsicLocationsBuilderX86_64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- CreateLeadingZeroLocations(arena_, invoke);
+ CreateLeadingZeroLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
}
-static void CreateTrailingZeroLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateTrailingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresRegister());
}
@@ -2946,7 +2919,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- CreateTrailingZeroLocations(arena_, invoke);
+ CreateTrailingZeroLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
@@ -2954,7 +2927,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- CreateTrailingZeroLocations(arena_, invoke);
+ CreateTrailingZeroLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
@@ -3029,9 +3002,8 @@
}
void IntrinsicLocationsBuilderX86_64::VisitThreadInterrupted(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
diff --git a/compiler/optimizing/intrinsics_x86_64.h b/compiler/optimizing/intrinsics_x86_64.h
index 4b28788..b0fbe91 100644
--- a/compiler/optimizing/intrinsics_x86_64.h
+++ b/compiler/optimizing/intrinsics_x86_64.h
@@ -49,7 +49,7 @@
bool TryDispatch(HInvoke* invoke);
private:
- ArenaAllocator* arena_;
+ ArenaAllocator* allocator_;
CodeGeneratorX86_64* codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderX86_64);
diff --git a/compiler/optimizing/licm.cc b/compiler/optimizing/licm.cc
index 10524b0..7af1a20 100644
--- a/compiler/optimizing/licm.cc
+++ b/compiler/optimizing/licm.cc
@@ -84,10 +84,10 @@
// Only used during debug.
ArenaBitVector* visited = nullptr;
if (kIsDebugBuild) {
- visited = new (graph_->GetArena()) ArenaBitVector(graph_->GetArena(),
- graph_->GetBlocks().size(),
- false,
- kArenaAllocLICM);
+ visited = new (graph_->GetAllocator()) ArenaBitVector(graph_->GetAllocator(),
+ graph_->GetBlocks().size(),
+ false,
+ kArenaAllocLICM);
}
// Post order visit to visit inner loops before outer loops.
diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc
index 0617e60..adc3cab 100644
--- a/compiler/optimizing/licm_test.cc
+++ b/compiler/optimizing/licm_test.cc
@@ -27,12 +27,10 @@
/**
* Fixture class for the LICM tests.
*/
-class LICMTest : public CommonCompilerTest {
+class LICMTest : public OptimizingUnitTest {
public:
LICMTest()
- : pool_(),
- allocator_(&pool_),
- entry_(nullptr),
+ : entry_(nullptr),
loop_preheader_(nullptr),
loop_header_(nullptr),
loop_body_(nullptr),
@@ -41,7 +39,7 @@
parameter_(nullptr),
int_constant_(nullptr),
float_constant_(nullptr) {
- graph_ = CreateGraph(&allocator_);
+ graph_ = CreateGraph();
}
~LICMTest() { }
@@ -49,12 +47,12 @@
// Builds a singly-nested loop structure in CFG. Tests can further populate
// the basic blocks with instructions to set up interesting scenarios.
void BuildLoop() {
- entry_ = new (&allocator_) HBasicBlock(graph_);
- loop_preheader_ = new (&allocator_) HBasicBlock(graph_);
- loop_header_ = new (&allocator_) HBasicBlock(graph_);
- loop_body_ = new (&allocator_) HBasicBlock(graph_);
- return_ = new (&allocator_) HBasicBlock(graph_);
- exit_ = new (&allocator_) HBasicBlock(graph_);
+ entry_ = new (GetAllocator()) HBasicBlock(graph_);
+ loop_preheader_ = new (GetAllocator()) HBasicBlock(graph_);
+ loop_header_ = new (GetAllocator()) HBasicBlock(graph_);
+ loop_body_ = new (GetAllocator()) HBasicBlock(graph_);
+ return_ = new (GetAllocator()) HBasicBlock(graph_);
+ exit_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry_);
graph_->AddBlock(loop_preheader_);
@@ -75,18 +73,18 @@
return_->AddSuccessor(exit_);
// Provide boiler-plate instructions.
- parameter_ = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kReference);
+ parameter_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kReference);
entry_->AddInstruction(parameter_);
int_constant_ = graph_->GetIntConstant(42);
float_constant_ = graph_->GetFloatConstant(42.0f);
- loop_preheader_->AddInstruction(new (&allocator_) HGoto());
- loop_header_->AddInstruction(new (&allocator_) HIf(parameter_));
- loop_body_->AddInstruction(new (&allocator_) HGoto());
- return_->AddInstruction(new (&allocator_) HReturnVoid());
- exit_->AddInstruction(new (&allocator_) HExit());
+ loop_preheader_->AddInstruction(new (GetAllocator()) HGoto());
+ loop_header_->AddInstruction(new (GetAllocator()) HIf(parameter_));
+ loop_body_->AddInstruction(new (GetAllocator()) HGoto());
+ return_->AddInstruction(new (GetAllocator()) HReturnVoid());
+ exit_->AddInstruction(new (GetAllocator()) HExit());
}
// Performs LICM optimizations (after proper set up).
@@ -98,8 +96,6 @@
}
// General building fields.
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
// Specific basic blocks.
@@ -123,17 +119,17 @@
BuildLoop();
// Populate the loop with instructions: set/get field with different types.
- HInstruction* get_field = new (&allocator_) HInstanceFieldGet(parameter_,
- nullptr,
- DataType::Type::kInt64,
- MemberOffset(10),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph_->GetDexFile(),
- 0);
+ HInstruction* get_field = new (GetAllocator()) HInstanceFieldGet(parameter_,
+ nullptr,
+ DataType::Type::kInt64,
+ MemberOffset(10),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph_->GetDexFile(),
+ 0);
loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
- HInstruction* set_field = new (&allocator_) HInstanceFieldSet(
+ HInstruction* set_field = new (GetAllocator()) HInstanceFieldSet(
parameter_, int_constant_, nullptr, DataType::Type::kInt32, MemberOffset(20),
false, kUnknownFieldIndex, kUnknownClassDefIndex, graph_->GetDexFile(), 0);
loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
@@ -150,26 +146,26 @@
// Populate the loop with instructions: set/get field with same types.
ScopedNullHandle<mirror::DexCache> dex_cache;
- HInstruction* get_field = new (&allocator_) HInstanceFieldGet(parameter_,
- nullptr,
- DataType::Type::kInt64,
- MemberOffset(10),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph_->GetDexFile(),
- 0);
+ HInstruction* get_field = new (GetAllocator()) HInstanceFieldGet(parameter_,
+ nullptr,
+ DataType::Type::kInt64,
+ MemberOffset(10),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph_->GetDexFile(),
+ 0);
loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
- HInstruction* set_field = new (&allocator_) HInstanceFieldSet(parameter_,
- get_field,
- nullptr,
- DataType::Type::kInt64,
- MemberOffset(10),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph_->GetDexFile(),
- 0);
+ HInstruction* set_field = new (GetAllocator()) HInstanceFieldSet(parameter_,
+ get_field,
+ nullptr,
+ DataType::Type::kInt64,
+ MemberOffset(10),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph_->GetDexFile(),
+ 0);
loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
EXPECT_EQ(get_field->GetBlock(), loop_body_);
@@ -183,10 +179,10 @@
BuildLoop();
// Populate the loop with instructions: set/get array with different types.
- HInstruction* get_array = new (&allocator_) HArrayGet(
+ HInstruction* get_array = new (GetAllocator()) HArrayGet(
parameter_, int_constant_, DataType::Type::kInt32, 0);
loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction());
- HInstruction* set_array = new (&allocator_) HArraySet(
+ HInstruction* set_array = new (GetAllocator()) HArraySet(
parameter_, int_constant_, float_constant_, DataType::Type::kFloat32, 0);
loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction());
@@ -201,10 +197,10 @@
BuildLoop();
// Populate the loop with instructions: set/get array with same types.
- HInstruction* get_array = new (&allocator_) HArrayGet(
+ HInstruction* get_array = new (GetAllocator()) HArrayGet(
parameter_, int_constant_, DataType::Type::kFloat32, 0);
loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction());
- HInstruction* set_array = new (&allocator_) HArraySet(
+ HInstruction* set_array = new (GetAllocator()) HArraySet(
parameter_, get_array, float_constant_, DataType::Type::kFloat32, 0);
loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction());
diff --git a/compiler/optimizing/linear_order.cc b/compiler/optimizing/linear_order.cc
index 80cecd4..58e00a8 100644
--- a/compiler/optimizing/linear_order.cc
+++ b/compiler/optimizing/linear_order.cc
@@ -16,6 +16,9 @@
#include "linear_order.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
+
namespace art {
static bool InSameLoop(HLoopInformation* first_loop, HLoopInformation* second_loop) {
@@ -34,7 +37,8 @@
}
// Helper method to update work list for linear order.
-static void AddToListForLinearization(ArenaVector<HBasicBlock*>* worklist, HBasicBlock* block) {
+static void AddToListForLinearization(ScopedArenaVector<HBasicBlock*>* worklist,
+ HBasicBlock* block) {
HLoopInformation* block_loop = block->GetLoopInformation();
auto insert_pos = worklist->rbegin(); // insert_pos.base() will be the actual position.
for (auto end = worklist->rend(); insert_pos != end; ++insert_pos) {
@@ -51,7 +55,7 @@
}
// Helper method to validate linear order.
-static bool IsLinearOrderWellFormed(const HGraph* graph, ArenaVector<HBasicBlock*>* linear_order) {
+static bool IsLinearOrderWellFormed(const HGraph* graph, ArrayRef<HBasicBlock*> linear_order) {
for (HBasicBlock* header : graph->GetBlocks()) {
if (header == nullptr || !header->IsLoopHeader()) {
continue;
@@ -59,7 +63,7 @@
HLoopInformation* loop = header->GetLoopInformation();
size_t num_blocks = loop->GetBlocks().NumSetBits();
size_t found_blocks = 0u;
- for (HBasicBlock* block : *linear_order) {
+ for (HBasicBlock* block : linear_order) {
if (loop->Contains(*block)) {
found_blocks++;
if (found_blocks == 1u && block != header) {
@@ -79,10 +83,8 @@
return true;
}
-void LinearizeGraph(const HGraph* graph,
- ArenaAllocator* allocator,
- ArenaVector<HBasicBlock*>* linear_order) {
- DCHECK(linear_order->empty());
+void LinearizeGraphInternal(const HGraph* graph, ArrayRef<HBasicBlock*> linear_order) {
+ DCHECK_EQ(linear_order.size(), graph->GetReversePostOrder().size());
// Create a reverse post ordering with the following properties:
// - Blocks in a loop are consecutive,
// - Back-edge is the last block before loop exits.
@@ -92,8 +94,9 @@
// current reverse post order in the graph, but it would require making
// order queries to a GrowableArray, which is not the best data structure
// for it.
- ArenaVector<uint32_t> forward_predecessors(graph->GetBlocks().size(),
- allocator->Adapter(kArenaAllocLinearOrder));
+ ScopedArenaAllocator allocator(graph->GetArenaStack());
+ ScopedArenaVector<uint32_t> forward_predecessors(graph->GetBlocks().size(),
+ allocator.Adapter(kArenaAllocLinearOrder));
for (HBasicBlock* block : graph->GetReversePostOrder()) {
size_t number_of_forward_predecessors = block->GetPredecessors().size();
if (block->IsLoopHeader()) {
@@ -105,13 +108,14 @@
// iterate over the successors. When all non-back edge predecessors of a
// successor block are visited, the successor block is added in the worklist
// following an order that satisfies the requirements to build our linear graph.
- linear_order->reserve(graph->GetReversePostOrder().size());
- ArenaVector<HBasicBlock*> worklist(allocator->Adapter(kArenaAllocLinearOrder));
+ ScopedArenaVector<HBasicBlock*> worklist(allocator.Adapter(kArenaAllocLinearOrder));
worklist.push_back(graph->GetEntryBlock());
+ size_t num_added = 0u;
do {
HBasicBlock* current = worklist.back();
worklist.pop_back();
- linear_order->push_back(current);
+ linear_order[num_added] = current;
+ ++num_added;
for (HBasicBlock* successor : current->GetSuccessors()) {
int block_id = successor->GetBlockId();
size_t number_of_remaining_predecessors = forward_predecessors[block_id];
@@ -121,6 +125,7 @@
forward_predecessors[block_id] = number_of_remaining_predecessors - 1;
}
} while (!worklist.empty());
+ DCHECK_EQ(num_added, linear_order.size());
DCHECK(graph->HasIrreducibleLoops() || IsLinearOrderWellFormed(graph, linear_order));
}
diff --git a/compiler/optimizing/linear_order.h b/compiler/optimizing/linear_order.h
index 7122d67..151db00 100644
--- a/compiler/optimizing/linear_order.h
+++ b/compiler/optimizing/linear_order.h
@@ -17,10 +17,14 @@
#ifndef ART_COMPILER_OPTIMIZING_LINEAR_ORDER_H_
#define ART_COMPILER_OPTIMIZING_LINEAR_ORDER_H_
+#include <type_traits>
+
#include "nodes.h"
namespace art {
+void LinearizeGraphInternal(const HGraph* graph, ArrayRef<HBasicBlock*> linear_order);
+
// Linearizes the 'graph' such that:
// (1): a block is always after its dominator,
// (2): blocks of loops are contiguous.
@@ -32,9 +36,15 @@
//
// for (HBasicBlock* block : ReverseRange(linear_order)) // linear post order
//
-void LinearizeGraph(const HGraph* graph,
- ArenaAllocator* allocator,
- ArenaVector<HBasicBlock*>* linear_order);
+template <typename Vector>
+void LinearizeGraph(const HGraph* graph, Vector* linear_order) {
+ static_assert(std::is_same<HBasicBlock*, typename Vector::value_type>::value,
+ "Vector::value_type must be HBasicBlock*.");
+ // Resize the vector and pass an ArrayRef<> to internal implementation which is shared
+ // for all kinds of vectors, i.e. ArenaVector<> or ScopedArenaVector<>.
+ linear_order->resize(graph->GetReversePostOrder().size());
+ LinearizeGraphInternal(graph, ArrayRef<HBasicBlock*>(*linear_order));
+}
} // namespace art
diff --git a/compiler/optimizing/linearize_test.cc b/compiler/optimizing/linearize_test.cc
index 3831aa6..b2a9c0a 100644
--- a/compiler/optimizing/linearize_test.cc
+++ b/compiler/optimizing/linearize_test.cc
@@ -32,17 +32,20 @@
namespace art {
-class LinearizeTest : public CommonCompilerTest {};
+class LinearizeTest : public OptimizingUnitTest {
+ protected:
+ template <size_t number_of_blocks>
+ void TestCode(const uint16_t* data, const uint32_t (&expected_order)[number_of_blocks]);
+};
template <size_t number_of_blocks>
-static void TestCode(const uint16_t* data, const uint32_t (&expected_order)[number_of_blocks]) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+void LinearizeTest::TestCode(const uint16_t* data,
+ const uint32_t (&expected_order)[number_of_blocks]) {
+ HGraph* graph = CreateCFG(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
ASSERT_EQ(graph->GetLinearOrder().size(), number_of_blocks);
diff --git a/compiler/optimizing/live_interval_test.cc b/compiler/optimizing/live_interval_test.cc
index 405f261..c60386d 100644
--- a/compiler/optimizing/live_interval_test.cc
+++ b/compiler/optimizing/live_interval_test.cc
@@ -23,29 +23,29 @@
namespace art {
TEST(LiveInterval, GetStart) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaPoolAndAllocator pool;
+ ScopedArenaAllocator* allocator = pool.GetScopedAllocator();
{
static constexpr size_t ranges[][2] = {{0, 42}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
ASSERT_EQ(0u, interval->GetStart());
}
{
static constexpr size_t ranges[][2] = {{4, 12}, {14, 16}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
ASSERT_EQ(4u, interval->GetStart());
}
}
TEST(LiveInterval, IsDeadAt) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaPoolAndAllocator pool;
+ ScopedArenaAllocator* allocator = pool.GetScopedAllocator();
{
static constexpr size_t ranges[][2] = {{0, 42}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
ASSERT_TRUE(interval->IsDeadAt(42));
ASSERT_TRUE(interval->IsDeadAt(43));
ASSERT_FALSE(interval->IsDeadAt(41));
@@ -55,7 +55,7 @@
{
static constexpr size_t ranges[][2] = {{4, 12}, {14, 16}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
ASSERT_TRUE(interval->IsDeadAt(16));
ASSERT_TRUE(interval->IsDeadAt(32));
ASSERT_FALSE(interval->IsDeadAt(0));
@@ -68,12 +68,12 @@
}
TEST(LiveInterval, Covers) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaPoolAndAllocator pool;
+ ScopedArenaAllocator* allocator = pool.GetScopedAllocator();
{
static constexpr size_t ranges[][2] = {{0, 42}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
ASSERT_TRUE(interval->Covers(0));
ASSERT_TRUE(interval->Covers(4));
ASSERT_TRUE(interval->Covers(41));
@@ -83,7 +83,7 @@
{
static constexpr size_t ranges[][2] = {{4, 12}, {14, 16}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
ASSERT_FALSE(interval->Covers(0));
ASSERT_TRUE(interval->Covers(4));
ASSERT_TRUE(interval->Covers(11));
@@ -96,68 +96,68 @@
}
TEST(LiveInterval, FirstIntersectionWith) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaPoolAndAllocator pool;
+ ScopedArenaAllocator* allocator = pool.GetScopedAllocator();
{
static constexpr size_t ranges1[][2] = {{0, 4}, {8, 10}};
- LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+ LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), allocator);
static constexpr size_t ranges2[][2] = {{5, 6}};
- LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+ LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), allocator);
ASSERT_EQ(kNoLifetime, interval1->FirstIntersectionWith(interval2));
}
{
static constexpr size_t ranges1[][2] = {{0, 4}, {8, 10}};
- LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+ LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), allocator);
static constexpr size_t ranges2[][2] = {{5, 42}};
- LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+ LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), allocator);
ASSERT_EQ(8u, interval1->FirstIntersectionWith(interval2));
}
{
static constexpr size_t ranges1[][2] = {{0, 4}, {8, 10}};
- LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+ LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), allocator);
static constexpr size_t ranges2[][2] = {{5, 6}, {7, 8}, {11, 12}};
- LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+ LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), allocator);
ASSERT_EQ(kNoLifetime, interval1->FirstIntersectionWith(interval2));
}
{
static constexpr size_t ranges1[][2] = {{0, 4}, {8, 10}};
- LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+ LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), allocator);
static constexpr size_t ranges2[][2] = {{5, 6}, {7, 8}, {9, 10}};
- LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+ LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), allocator);
ASSERT_EQ(9u, interval1->FirstIntersectionWith(interval2));
}
{
static constexpr size_t ranges1[][2] = {{0, 1}, {2, 7}, {8, 10}};
- LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+ LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), allocator);
static constexpr size_t ranges2[][2] = {{1, 2}, {6, 7}, {9, 10}};
- LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+ LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), allocator);
ASSERT_EQ(6u, interval1->FirstIntersectionWith(interval2));
}
{
static constexpr size_t ranges1[][2] = {{0, 1}, {2, 8}, {55, 58}};
- LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+ LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), allocator);
static constexpr size_t ranges2[][2] = {{1, 2}, {11, 42}, {43, 48}, {54, 56}};
- LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+ LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), allocator);
ASSERT_EQ(55u, interval1->FirstIntersectionWith(interval2));
}
{
static constexpr size_t ranges1[][2] = {{0, 1}, {2, 8}, {15, 18}, {27, 32}, {41, 53}, {54, 60}};
- LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+ LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), allocator);
static constexpr size_t ranges2[][2] = {{1, 2}, {11, 12}, {19, 25}, {34, 42}, {52, 60}};
- LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+ LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), allocator);
ASSERT_EQ(41u, interval1->FirstIntersectionWith(interval2));
}
@@ -188,13 +188,13 @@
}
TEST(LiveInterval, SplitAt) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaPoolAndAllocator pool;
+ ScopedArenaAllocator* allocator = pool.GetScopedAllocator();
{
// Test within one range.
static constexpr size_t ranges[][2] = {{0, 4}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
LiveInterval* split = interval->SplitAt(1);
static constexpr size_t expected[][2] = {{0, 1}};
ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
@@ -205,7 +205,7 @@
{
// Test just before the end of one range.
static constexpr size_t ranges[][2] = {{0, 4}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
LiveInterval* split = interval->SplitAt(3);
static constexpr size_t expected[][2] = {{0, 3}};
ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
@@ -216,7 +216,7 @@
{
// Test withing the first range.
static constexpr size_t ranges[][2] = {{0, 4}, {8, 12}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
LiveInterval* split = interval->SplitAt(1);
static constexpr size_t expected[][2] = {{0, 1}};
ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
@@ -227,7 +227,7 @@
{
// Test in a hole.
static constexpr size_t ranges[][2] = {{0, 4}, {8, 12}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
LiveInterval* split = interval->SplitAt(5);
static constexpr size_t expected[][2] = {{0, 4}};
ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
@@ -238,7 +238,7 @@
{
// Test withing the second range.
static constexpr size_t ranges[][2] = {{0, 4}, {8, 12}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
LiveInterval* split = interval->SplitAt(9);
static constexpr size_t expected[][2] = {{0, 4}, {8, 9}};
ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
@@ -249,7 +249,7 @@
{
// Test at the beginning of the second range.
static constexpr size_t ranges[][2] = {{0, 4}, {6, 10}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
LiveInterval* split = interval->SplitAt(6);
static constexpr size_t expected[][2] = {{0, 4}};
ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
@@ -260,7 +260,7 @@
{
// Test at the end of the first range.
static constexpr size_t ranges[][2] = {{0, 4}, {6, 10}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
LiveInterval* split = interval->SplitAt(4);
static constexpr size_t expected[][2] = {{0, 4}};
ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
@@ -271,7 +271,7 @@
{
// Test that we get null if we split at a position where the interval is dead.
static constexpr size_t ranges[][2] = {{0, 4}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
LiveInterval* split = interval->SplitAt(5);
ASSERT_TRUE(split == nullptr);
ASSERT_TRUE(RangesEquals(interval, ranges, arraysize(ranges)));
@@ -279,13 +279,13 @@
}
TEST(LiveInterval, AddLoopRange) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaPoolAndAllocator pool;
+ ScopedArenaAllocator* allocator = pool.GetScopedAllocator();
{
// Test when only used in a loop.
static constexpr size_t ranges[][2] = {{0, 4}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
interval->AddLoopRange(0, 8);
LiveRange* range = interval->GetFirstRange();
ASSERT_TRUE(range->GetNext() == nullptr);
@@ -296,7 +296,7 @@
{
// Test when only used in a loop.
static constexpr size_t ranges[][2] = {{2, 4}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
interval->AddLoopRange(0, 8);
LiveRange* range = interval->GetFirstRange();
ASSERT_TRUE(range->GetNext() == nullptr);
@@ -307,7 +307,7 @@
{
// Test when used just after the loop.
static constexpr size_t ranges[][2] = {{2, 4}, {8, 10}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
interval->AddLoopRange(0, 8);
LiveRange* range = interval->GetFirstRange();
ASSERT_TRUE(range->GetNext() == nullptr);
@@ -318,7 +318,7 @@
{
// Test when use after the loop is after a lifetime hole.
static constexpr size_t ranges[][2] = {{2, 4}, {10, 12}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
interval->AddLoopRange(0, 8);
LiveRange* range = interval->GetFirstRange();
ASSERT_EQ(range->GetStart(), 0u);
diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc
index f9a955f..ddcad5a 100644
--- a/compiler/optimizing/live_ranges_test.cc
+++ b/compiler/optimizing/live_ranges_test.cc
@@ -29,10 +29,13 @@
namespace art {
-class LiveRangesTest : public CommonCompilerTest {};
+class LiveRangesTest : public OptimizingUnitTest {
+ public:
+ HGraph* BuildGraph(const uint16_t* data);
+};
-static HGraph* BuildGraph(const uint16_t* data, ArenaAllocator* allocator) {
- HGraph* graph = CreateCFG(allocator, data);
+HGraph* LiveRangesTest::BuildGraph(const uint16_t* data) {
+ HGraph* graph = CreateCFG(data);
// Suspend checks implementation may change in the future, and this test relies
// on how instructions are ordered.
RemoveSuspendChecks(graph);
@@ -58,14 +61,12 @@
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = BuildGraph(data, &allocator);
+ HGraph* graph = BuildGraph(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
LiveInterval* interval = liveness.GetInstructionFromSsaIndex(0)->GetLiveInterval();
@@ -107,13 +108,11 @@
Instruction::GOTO | 0x100,
Instruction::RETURN | 0 << 8);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = BuildGraph(data, &allocator);
+ HGraph* graph = BuildGraph(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
LiveInterval* interval = liveness.GetInstructionFromSsaIndex(0)->GetLiveInterval();
@@ -158,13 +157,11 @@
Instruction::CONST_4 | 4 << 12 | 0,
Instruction::RETURN | 0 << 8);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = BuildGraph(data, &allocator);
+ HGraph* graph = BuildGraph(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// Test for the 4 constant.
@@ -236,14 +233,12 @@
Instruction::CONST_4 | 5 << 12 | 1 << 8,
Instruction::RETURN | 1 << 8);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = BuildGraph(data, &allocator);
+ HGraph* graph = BuildGraph(data);
RemoveSuspendChecks(graph);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// Test for the 0 constant.
@@ -316,13 +311,11 @@
Instruction::GOTO | 0xFB00,
Instruction::RETURN | 0 << 8);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = BuildGraph(data, &allocator);
+ HGraph* graph = BuildGraph(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// Test for the 0 constant.
@@ -394,13 +387,11 @@
Instruction::ADD_INT, 1 << 8,
Instruction::RETURN);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = BuildGraph(data, &allocator);
+ HGraph* graph = BuildGraph(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// Test for the 0 constant.
diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc
index 37b58de..3eadc8f 100644
--- a/compiler/optimizing/liveness_test.cc
+++ b/compiler/optimizing/liveness_test.cc
@@ -29,7 +29,10 @@
namespace art {
-class LivenessTest : public CommonCompilerTest {};
+class LivenessTest : public OptimizingUnitTest {
+ protected:
+ void TestCode(const uint16_t* data, const char* expected);
+};
static void DumpBitVector(BitVector* vector,
std::ostream& buffer,
@@ -43,16 +46,14 @@
buffer << ")\n";
}
-static void TestCode(const uint16_t* data, const char* expected) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+void LivenessTest::TestCode(const uint16_t* data, const char* expected) {
+ HGraph* graph = CreateCFG(data);
// `Inline` conditions into ifs.
PrepareForRegisterAllocation(graph).Run();
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
std::ostringstream buffer;
diff --git a/compiler/optimizing/load_store_analysis.h b/compiler/optimizing/load_store_analysis.h
index d46b904..6a25da3 100644
--- a/compiler/optimizing/load_store_analysis.h
+++ b/compiler/optimizing/load_store_analysis.h
@@ -172,9 +172,9 @@
explicit HeapLocationCollector(HGraph* graph)
: HGraphVisitor(graph),
- ref_info_array_(graph->GetArena()->Adapter(kArenaAllocLSE)),
- heap_locations_(graph->GetArena()->Adapter(kArenaAllocLSE)),
- aliasing_matrix_(graph->GetArena(),
+ ref_info_array_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
+ heap_locations_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
+ aliasing_matrix_(graph->GetAllocator(),
kInitialAliasingMatrixBitVectorSize,
true,
kArenaAllocLSE),
@@ -362,7 +362,7 @@
ReferenceInfo* ref_info = FindReferenceInfoOf(instruction);
if (ref_info == nullptr) {
size_t pos = ref_info_array_.size();
- ref_info = new (GetGraph()->GetArena()) ReferenceInfo(instruction, pos);
+ ref_info = new (GetGraph()->GetAllocator()) ReferenceInfo(instruction, pos);
ref_info_array_.push_back(ref_info);
}
return ref_info;
@@ -385,7 +385,7 @@
size_t heap_location_idx = FindHeapLocationIndex(
ref_info, offset, index, declaring_class_def_index);
if (heap_location_idx == kHeapLocationNotFound) {
- HeapLocation* heap_loc = new (GetGraph()->GetArena())
+ HeapLocation* heap_loc = new (GetGraph()->GetAllocator())
HeapLocation(ref_info, offset, index, declaring_class_def_index);
heap_locations_.push_back(heap_loc);
return heap_loc;
diff --git a/compiler/optimizing/load_store_analysis_test.cc b/compiler/optimizing/load_store_analysis_test.cc
index 0df2f27..86696d0 100644
--- a/compiler/optimizing/load_store_analysis_test.cc
+++ b/compiler/optimizing/load_store_analysis_test.cc
@@ -22,19 +22,15 @@
namespace art {
-class LoadStoreAnalysisTest : public CommonCompilerTest {
+class LoadStoreAnalysisTest : public OptimizingUnitTest {
public:
- LoadStoreAnalysisTest() : pool_(), allocator_(&pool_) {
- graph_ = CreateGraph(&allocator_);
- }
+ LoadStoreAnalysisTest() : graph_(CreateGraph()) { }
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
};
TEST_F(LoadStoreAnalysisTest, ArrayHeapLocations) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
@@ -48,18 +44,19 @@
// array_get2 ArrayGet [array, c2]
// array_set1 ArraySet [array, c1, c3]
// array_set2 ArraySet [array, index, c3]
- HInstruction* array = new (&allocator_) HParameterValue(
+ HInstruction* array = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
- HInstruction* index = new (&allocator_) HParameterValue(
+ HInstruction* index = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32);
HInstruction* c1 = graph_->GetIntConstant(1);
HInstruction* c2 = graph_->GetIntConstant(2);
HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* array_get1 = new (&allocator_) HArrayGet(array, c1, DataType::Type::kInt32, 0);
- HInstruction* array_get2 = new (&allocator_) HArrayGet(array, c2, DataType::Type::kInt32, 0);
- HInstruction* array_set1 = new (&allocator_) HArraySet(array, c1, c3, DataType::Type::kInt32, 0);
+ HInstruction* array_get1 = new (GetAllocator()) HArrayGet(array, c1, DataType::Type::kInt32, 0);
+ HInstruction* array_get2 = new (GetAllocator()) HArrayGet(array, c2, DataType::Type::kInt32, 0);
+ HInstruction* array_set1 =
+ new (GetAllocator()) HArraySet(array, c1, c3, DataType::Type::kInt32, 0);
HInstruction* array_set2 =
- new (&allocator_) HArraySet(array, index, c3, DataType::Type::kInt32, 0);
+ new (GetAllocator()) HArraySet(array, index, c3, DataType::Type::kInt32, 0);
entry->AddInstruction(array);
entry->AddInstruction(index);
entry->AddInstruction(array_get1);
@@ -107,7 +104,7 @@
}
TEST_F(LoadStoreAnalysisTest, FieldHeapLocations) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
@@ -119,38 +116,38 @@
// get_field20 InstanceFieldGet [object, 20]
HInstruction* c1 = graph_->GetIntConstant(1);
- HInstruction* object = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kReference);
- HInstanceFieldSet* set_field10 = new (&allocator_) HInstanceFieldSet(object,
- c1,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(10),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph_->GetDexFile(),
- 0);
- HInstanceFieldGet* get_field10 = new (&allocator_) HInstanceFieldGet(object,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(10),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph_->GetDexFile(),
- 0);
- HInstanceFieldGet* get_field20 = new (&allocator_) HInstanceFieldGet(object,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(20),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph_->GetDexFile(),
- 0);
+ HInstruction* object = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kReference);
+ HInstanceFieldSet* set_field10 = new (GetAllocator()) HInstanceFieldSet(object,
+ c1,
+ nullptr,
+ DataType::Type::kInt32,
+ MemberOffset(10),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph_->GetDexFile(),
+ 0);
+ HInstanceFieldGet* get_field10 = new (GetAllocator()) HInstanceFieldGet(object,
+ nullptr,
+ DataType::Type::kInt32,
+ MemberOffset(10),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph_->GetDexFile(),
+ 0);
+ HInstanceFieldGet* get_field20 = new (GetAllocator()) HInstanceFieldGet(object,
+ nullptr,
+ DataType::Type::kInt32,
+ MemberOffset(20),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph_->GetDexFile(),
+ 0);
entry->AddInstruction(object);
entry->AddInstruction(set_field10);
entry->AddInstruction(get_field10);
@@ -186,34 +183,38 @@
}
TEST_F(LoadStoreAnalysisTest, ArrayIndexAliasingTest) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
graph_->BuildDominatorTree();
- HInstruction* array = new (&allocator_) HParameterValue(
+ HInstruction* array = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
- HInstruction* index = new (&allocator_) HParameterValue(
+ HInstruction* index = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32);
HInstruction* c0 = graph_->GetIntConstant(0);
HInstruction* c1 = graph_->GetIntConstant(1);
HInstruction* c_neg1 = graph_->GetIntConstant(-1);
- HInstruction* add0 = new (&allocator_) HAdd(DataType::Type::kInt32, index, c0);
- HInstruction* add1 = new (&allocator_) HAdd(DataType::Type::kInt32, index, c1);
- HInstruction* sub0 = new (&allocator_) HSub(DataType::Type::kInt32, index, c0);
- HInstruction* sub1 = new (&allocator_) HSub(DataType::Type::kInt32, index, c1);
- HInstruction* sub_neg1 = new (&allocator_) HSub(DataType::Type::kInt32, index, c_neg1);
- HInstruction* rev_sub1 = new (&allocator_) HSub(DataType::Type::kInt32, c1, index);
- HInstruction* arr_set1 = new (&allocator_) HArraySet(array, c0, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set2 = new (&allocator_) HArraySet(array, c1, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set3 = new (&allocator_) HArraySet(array, add0, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set4 = new (&allocator_) HArraySet(array, add1, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set5 = new (&allocator_) HArraySet(array, sub0, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set6 = new (&allocator_) HArraySet(array, sub1, c0, DataType::Type::kInt32, 0);
+ HInstruction* add0 = new (GetAllocator()) HAdd(DataType::Type::kInt32, index, c0);
+ HInstruction* add1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, index, c1);
+ HInstruction* sub0 = new (GetAllocator()) HSub(DataType::Type::kInt32, index, c0);
+ HInstruction* sub1 = new (GetAllocator()) HSub(DataType::Type::kInt32, index, c1);
+ HInstruction* sub_neg1 = new (GetAllocator()) HSub(DataType::Type::kInt32, index, c_neg1);
+ HInstruction* rev_sub1 = new (GetAllocator()) HSub(DataType::Type::kInt32, c1, index);
+ HInstruction* arr_set1 = new (GetAllocator()) HArraySet(array, c0, c0, DataType::Type::kInt32, 0);
+ HInstruction* arr_set2 = new (GetAllocator()) HArraySet(array, c1, c0, DataType::Type::kInt32, 0);
+ HInstruction* arr_set3 =
+ new (GetAllocator()) HArraySet(array, add0, c0, DataType::Type::kInt32, 0);
+ HInstruction* arr_set4 =
+ new (GetAllocator()) HArraySet(array, add1, c0, DataType::Type::kInt32, 0);
+ HInstruction* arr_set5 =
+ new (GetAllocator()) HArraySet(array, sub0, c0, DataType::Type::kInt32, 0);
+ HInstruction* arr_set6 =
+ new (GetAllocator()) HArraySet(array, sub1, c0, DataType::Type::kInt32, 0);
HInstruction* arr_set7 =
- new (&allocator_) HArraySet(array, rev_sub1, c0, DataType::Type::kInt32, 0);
+ new (GetAllocator()) HArraySet(array, rev_sub1, c0, DataType::Type::kInt32, 0);
HInstruction* arr_set8 =
- new (&allocator_) HArraySet(array, sub_neg1, c0, DataType::Type::kInt32, 0);
+ new (GetAllocator()) HArraySet(array, sub_neg1, c0, DataType::Type::kInt32, 0);
entry->AddInstruction(array);
entry->AddInstruction(index);
@@ -272,14 +273,14 @@
}
TEST_F(LoadStoreAnalysisTest, ArrayIndexCalculationOverflowTest) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
graph_->BuildDominatorTree();
- HInstruction* array = new (&allocator_) HParameterValue(
+ HInstruction* array = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
- HInstruction* index = new (&allocator_) HParameterValue(
+ HInstruction* index = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32);
HInstruction* c0 = graph_->GetIntConstant(0);
@@ -290,40 +291,40 @@
HInstruction* c_0x80000001 = graph_->GetIntConstant(0x80000001);
// `index+0x80000000` and `index-0x80000000` array indices MAY alias.
- HInstruction* add_0x80000000 = new (&allocator_) HAdd(
+ HInstruction* add_0x80000000 = new (GetAllocator()) HAdd(
DataType::Type::kInt32, index, c_0x80000000);
- HInstruction* sub_0x80000000 = new (&allocator_) HSub(
+ HInstruction* sub_0x80000000 = new (GetAllocator()) HSub(
DataType::Type::kInt32, index, c_0x80000000);
- HInstruction* arr_set_1 = new (&allocator_) HArraySet(
+ HInstruction* arr_set_1 = new (GetAllocator()) HArraySet(
array, add_0x80000000, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set_2 = new (&allocator_) HArraySet(
+ HInstruction* arr_set_2 = new (GetAllocator()) HArraySet(
array, sub_0x80000000, c0, DataType::Type::kInt32, 0);
// `index+0x10` and `index-0xFFFFFFF0` array indices MAY alias.
- HInstruction* add_0x10 = new (&allocator_) HAdd(DataType::Type::kInt32, index, c_0x10);
- HInstruction* sub_0xFFFFFFF0 = new (&allocator_) HSub(
+ HInstruction* add_0x10 = new (GetAllocator()) HAdd(DataType::Type::kInt32, index, c_0x10);
+ HInstruction* sub_0xFFFFFFF0 = new (GetAllocator()) HSub(
DataType::Type::kInt32, index, c_0xFFFFFFF0);
- HInstruction* arr_set_3 = new (&allocator_) HArraySet(
+ HInstruction* arr_set_3 = new (GetAllocator()) HArraySet(
array, add_0x10, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set_4 = new (&allocator_) HArraySet(
+ HInstruction* arr_set_4 = new (GetAllocator()) HArraySet(
array, sub_0xFFFFFFF0, c0, DataType::Type::kInt32, 0);
// `index+0x7FFFFFFF` and `index-0x80000001` array indices MAY alias.
- HInstruction* add_0x7FFFFFFF = new (&allocator_) HAdd(
+ HInstruction* add_0x7FFFFFFF = new (GetAllocator()) HAdd(
DataType::Type::kInt32, index, c_0x7FFFFFFF);
- HInstruction* sub_0x80000001 = new (&allocator_) HSub(
+ HInstruction* sub_0x80000001 = new (GetAllocator()) HSub(
DataType::Type::kInt32, index, c_0x80000001);
- HInstruction* arr_set_5 = new (&allocator_) HArraySet(
+ HInstruction* arr_set_5 = new (GetAllocator()) HArraySet(
array, add_0x7FFFFFFF, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set_6 = new (&allocator_) HArraySet(
+ HInstruction* arr_set_6 = new (GetAllocator()) HArraySet(
array, sub_0x80000001, c0, DataType::Type::kInt32, 0);
// `index+0` and `index-0` array indices MAY alias.
- HInstruction* add_0 = new (&allocator_) HAdd(DataType::Type::kInt32, index, c0);
- HInstruction* sub_0 = new (&allocator_) HSub(DataType::Type::kInt32, index, c0);
- HInstruction* arr_set_7 = new (&allocator_) HArraySet(
+ HInstruction* add_0 = new (GetAllocator()) HAdd(DataType::Type::kInt32, index, c0);
+ HInstruction* sub_0 = new (GetAllocator()) HSub(DataType::Type::kInt32, index, c0);
+ HInstruction* arr_set_7 = new (GetAllocator()) HArraySet(
array, add_0, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set_8 = new (&allocator_) HArraySet(
+ HInstruction* arr_set_8 = new (GetAllocator()) HArraySet(
array, sub_0, c0, DataType::Type::kInt32, 0);
entry->AddInstruction(array);
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 54c2d43..39bfc86 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -49,13 +49,13 @@
ArenaVector<HInstruction*>(heap_locations_collector.
GetNumberOfHeapLocations(),
kUnknownHeapValue,
- graph->GetArena()->Adapter(kArenaAllocLSE)),
- graph->GetArena()->Adapter(kArenaAllocLSE)),
- removed_loads_(graph->GetArena()->Adapter(kArenaAllocLSE)),
- substitute_instructions_for_loads_(graph->GetArena()->Adapter(kArenaAllocLSE)),
- possibly_removed_stores_(graph->GetArena()->Adapter(kArenaAllocLSE)),
- singleton_new_instances_(graph->GetArena()->Adapter(kArenaAllocLSE)),
- singleton_new_arrays_(graph->GetArena()->Adapter(kArenaAllocLSE)) {
+ graph->GetAllocator()->Adapter(kArenaAllocLSE)),
+ graph->GetAllocator()->Adapter(kArenaAllocLSE)),
+ removed_loads_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
+ substitute_instructions_for_loads_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
+ possibly_removed_stores_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
+ singleton_new_instances_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
+ singleton_new_arrays_(graph->GetAllocator()->Adapter(kArenaAllocLSE)) {
}
void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
diff --git a/compiler/optimizing/locations.cc b/compiler/optimizing/locations.cc
index 40fe35b..5879c6f 100644
--- a/compiler/optimizing/locations.cc
+++ b/compiler/optimizing/locations.cc
@@ -28,10 +28,10 @@
LocationSummary::LocationSummary(HInstruction* instruction,
CallKind call_kind,
- bool intrinsified)
- : inputs_(instruction->InputCount(),
- instruction->GetBlock()->GetGraph()->GetArena()->Adapter(kArenaAllocLocationSummary)),
- temps_(instruction->GetBlock()->GetGraph()->GetArena()->Adapter(kArenaAllocLocationSummary)),
+ bool intrinsified,
+ ArenaAllocator* allocator)
+ : inputs_(instruction->InputCount(), allocator->Adapter(kArenaAllocLocationSummary)),
+ temps_(allocator->Adapter(kArenaAllocLocationSummary)),
call_kind_(call_kind),
intrinsified_(intrinsified),
has_custom_slow_path_calling_convention_(false),
@@ -43,11 +43,17 @@
instruction->SetLocations(this);
if (NeedsSafepoint()) {
- ArenaAllocator* arena = instruction->GetBlock()->GetGraph()->GetArena();
- stack_mask_ = ArenaBitVector::Create(arena, 0, true, kArenaAllocLocationSummary);
+ stack_mask_ = ArenaBitVector::Create(allocator, 0, true, kArenaAllocLocationSummary);
}
}
+LocationSummary::LocationSummary(HInstruction* instruction,
+ CallKind call_kind,
+ bool intrinsified)
+ : LocationSummary(instruction,
+ call_kind,
+ intrinsified,
+ instruction->GetBlock()->GetGraph()->GetAllocator()) {}
Location Location::RegisterOrConstant(HInstruction* instruction) {
return instruction->IsConstant()
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index 6f0dbce..d56c151 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -665,6 +665,11 @@
}
private:
+ LocationSummary(HInstruction* instruction,
+ CallKind call_kind,
+ bool intrinsified,
+ ArenaAllocator* allocator);
+
ArenaVector<Location> inputs_;
ArenaVector<Location> temps_;
const CallKind call_kind_;
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index c51fafa..d87861b 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -429,7 +429,7 @@
// Check that instructions from the induction sets are fully removed: have no uses
// and no other instructions use them.
-static bool CheckInductionSetFullyRemoved(ArenaSet<HInstruction*>* iset) {
+static bool CheckInductionSetFullyRemoved(ScopedArenaSet<HInstruction*>* iset) {
for (HInstruction* instr : *iset) {
if (instr->GetBlock() != nullptr ||
!instr->GetUses().empty() ||
@@ -453,7 +453,7 @@
compiler_driver_(compiler_driver),
induction_range_(induction_analysis),
loop_allocator_(nullptr),
- global_allocator_(graph_->GetArena()),
+ global_allocator_(graph_->GetAllocator()),
top_loop_(nullptr),
last_loop_(nullptr),
iset_(nullptr),
@@ -465,7 +465,12 @@
vector_runtime_test_a_(nullptr),
vector_runtime_test_b_(nullptr),
vector_map_(nullptr),
- vector_permanent_map_(nullptr) {
+ vector_permanent_map_(nullptr),
+ vector_mode_(kSequential),
+ vector_preheader_(nullptr),
+ vector_header_(nullptr),
+ vector_body_(nullptr),
+ vector_index_(nullptr) {
}
void HLoopOptimization::Run() {
@@ -475,10 +480,8 @@
return;
}
- // Phase-local allocator that draws from the global pool. Since the allocator
- // itself resides on the stack, it is destructed on exiting Run(), which
- // implies its underlying memory is released immediately.
- ArenaAllocator allocator(global_allocator_->GetArenaPool());
+ // Phase-local allocator.
+ ScopedArenaAllocator allocator(graph_->GetArenaStack());
loop_allocator_ = &allocator;
// Perform loop optimizations.
@@ -499,8 +502,8 @@
void HLoopOptimization::LocalRun() {
// Build the linear order using the phase-local allocator. This step enables building
// a loop hierarchy that properly reflects the outer-inner and previous-next relation.
- ArenaVector<HBasicBlock*> linear_order(loop_allocator_->Adapter(kArenaAllocLinearOrder));
- LinearizeGraph(graph_, loop_allocator_, &linear_order);
+ ScopedArenaVector<HBasicBlock*> linear_order(loop_allocator_->Adapter(kArenaAllocLinearOrder));
+ LinearizeGraph(graph_, &linear_order);
// Build the loop hierarchy.
for (HBasicBlock* block : linear_order) {
@@ -513,13 +516,13 @@
// temporary data structures using the phase-local allocator. All new HIR
// should use the global allocator.
if (top_loop_ != nullptr) {
- ArenaSet<HInstruction*> iset(loop_allocator_->Adapter(kArenaAllocLoopOptimization));
- ArenaSafeMap<HInstruction*, HInstruction*> reds(
+ ScopedArenaSet<HInstruction*> iset(loop_allocator_->Adapter(kArenaAllocLoopOptimization));
+ ScopedArenaSafeMap<HInstruction*, HInstruction*> reds(
std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization));
- ArenaSet<ArrayReference> refs(loop_allocator_->Adapter(kArenaAllocLoopOptimization));
- ArenaSafeMap<HInstruction*, HInstruction*> map(
+ ScopedArenaSet<ArrayReference> refs(loop_allocator_->Adapter(kArenaAllocLoopOptimization));
+ ScopedArenaSafeMap<HInstruction*, HInstruction*> map(
std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization));
- ArenaSafeMap<HInstruction*, HInstruction*> perm(
+ ScopedArenaSafeMap<HInstruction*, HInstruction*> perm(
std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization));
// Attach.
iset_ = &iset;
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 6e6e387..b1b3d11 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -17,6 +17,8 @@
#ifndef ART_COMPILER_OPTIMIZING_LOOP_OPTIMIZATION_H_
#define ART_COMPILER_OPTIMIZING_LOOP_OPTIMIZATION_H_
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
#include "induction_var_range.h"
#include "nodes.h"
#include "optimization.h"
@@ -220,7 +222,7 @@
// Phase-local heap memory allocator for the loop optimizer. Storage obtained
// through this allocator is immediately released when the loop optimizer is done.
- ArenaAllocator* loop_allocator_;
+ ScopedArenaAllocator* loop_allocator_;
// Global heap memory allocator. Used to build HIR.
ArenaAllocator* global_allocator_;
@@ -232,14 +234,14 @@
// Temporary bookkeeping of a set of instructions.
// Contents reside in phase-local heap memory.
- ArenaSet<HInstruction*>* iset_;
+ ScopedArenaSet<HInstruction*>* iset_;
// Temporary bookkeeping of reduction instructions. Mapping is two-fold:
// (1) reductions in the loop-body are mapped back to their phi definition,
// (2) phi definitions are mapped to their initial value (updated during
// code generation to feed the proper values into the new chain).
// Contents reside in phase-local heap memory.
- ArenaSafeMap<HInstruction*, HInstruction*>* reductions_;
+ ScopedArenaSafeMap<HInstruction*, HInstruction*>* reductions_;
// Flag that tracks if any simplifications have occurred.
bool simplified_;
@@ -249,7 +251,7 @@
// Set of array references in the vector loop.
// Contents reside in phase-local heap memory.
- ArenaSet<ArrayReference>* vector_refs_;
+ ScopedArenaSet<ArrayReference>* vector_refs_;
// Dynamic loop peeling candidate for alignment.
const ArrayReference* vector_peeling_candidate_;
@@ -262,11 +264,11 @@
// loop (mode is kSequential) and the actual vector loop (mode is kVector). The data
// structure maps original instructions into the new instructions.
// Contents reside in phase-local heap memory.
- ArenaSafeMap<HInstruction*, HInstruction*>* vector_map_;
+ ScopedArenaSafeMap<HInstruction*, HInstruction*>* vector_map_;
// Permanent mapping used during vectorization synthesis.
// Contents reside in phase-local heap memory.
- ArenaSafeMap<HInstruction*, HInstruction*>* vector_permanent_map_;
+ ScopedArenaSafeMap<HInstruction*, HInstruction*>* vector_permanent_map_;
// Temporary vectorization bookkeeping.
VectorMode vector_mode_; // synthesis mode
diff --git a/compiler/optimizing/loop_optimization_test.cc b/compiler/optimizing/loop_optimization_test.cc
index 95718ae..4e1857d 100644
--- a/compiler/optimizing/loop_optimization_test.cc
+++ b/compiler/optimizing/loop_optimization_test.cc
@@ -24,14 +24,12 @@
* constructing the loop hierarchy. Actual optimizations are tested
* through the checker tests.
*/
-class LoopOptimizationTest : public CommonCompilerTest {
+class LoopOptimizationTest : public OptimizingUnitTest {
public:
LoopOptimizationTest()
- : pool_(),
- allocator_(&pool_),
- graph_(CreateGraph(&allocator_)),
- iva_(new (&allocator_) HInductionVarAnalysis(graph_)),
- loop_opt_(new (&allocator_) HLoopOptimization(graph_, nullptr, iva_, nullptr)) {
+ : graph_(CreateGraph()),
+ iva_(new (GetAllocator()) HInductionVarAnalysis(graph_)),
+ loop_opt_(new (GetAllocator()) HLoopOptimization(graph_, nullptr, iva_, nullptr)) {
BuildGraph();
}
@@ -40,38 +38,38 @@
/** Constructs bare minimum graph. */
void BuildGraph() {
graph_->SetNumberOfVRegs(1);
- entry_block_ = new (&allocator_) HBasicBlock(graph_);
- return_block_ = new (&allocator_) HBasicBlock(graph_);
- exit_block_ = new (&allocator_) HBasicBlock(graph_);
+ entry_block_ = new (GetAllocator()) HBasicBlock(graph_);
+ return_block_ = new (GetAllocator()) HBasicBlock(graph_);
+ exit_block_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry_block_);
graph_->AddBlock(return_block_);
graph_->AddBlock(exit_block_);
graph_->SetEntryBlock(entry_block_);
graph_->SetExitBlock(exit_block_);
- parameter_ = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kInt32);
+ parameter_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kInt32);
entry_block_->AddInstruction(parameter_);
- return_block_->AddInstruction(new (&allocator_) HReturnVoid());
- exit_block_->AddInstruction(new (&allocator_) HExit());
+ return_block_->AddInstruction(new (GetAllocator()) HReturnVoid());
+ exit_block_->AddInstruction(new (GetAllocator()) HExit());
entry_block_->AddSuccessor(return_block_);
return_block_->AddSuccessor(exit_block_);
}
/** Adds a loop nest at given position before successor. */
HBasicBlock* AddLoop(HBasicBlock* position, HBasicBlock* successor) {
- HBasicBlock* header = new (&allocator_) HBasicBlock(graph_);
- HBasicBlock* body = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* header = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* body = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(header);
graph_->AddBlock(body);
// Control flow.
position->ReplaceSuccessor(successor, header);
header->AddSuccessor(body);
header->AddSuccessor(successor);
- header->AddInstruction(new (&allocator_) HIf(parameter_));
+ header->AddInstruction(new (GetAllocator()) HIf(parameter_));
body->AddSuccessor(header);
- body->AddInstruction(new (&allocator_) HGoto());
+ body->AddInstruction(new (GetAllocator()) HGoto());
return header;
}
@@ -80,7 +78,8 @@
graph_->BuildDominatorTree();
iva_->Run();
// Do not release the loop hierarchy.
- loop_opt_->loop_allocator_ = &allocator_;
+ ScopedArenaAllocator loop_allocator(GetArenaStack());
+ loop_opt_->loop_allocator_ = &loop_allocator;
loop_opt_->LocalRun();
}
@@ -101,8 +100,6 @@
}
// General building fields.
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
HInductionVarAnalysis* iva_;
HLoopOptimization* loop_opt_;
@@ -199,8 +196,8 @@
// predecessors.
TEST_F(LoopOptimizationTest, SimplifyLoop) {
// Can't use AddLoop as we want special order for blocks predecessors.
- HBasicBlock* header = new (&allocator_) HBasicBlock(graph_);
- HBasicBlock* body = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* header = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* body = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(header);
graph_->AddBlock(body);
@@ -213,11 +210,11 @@
DCHECK(header->GetSuccessors()[1] == return_block_);
// Data flow.
- header->AddInstruction(new (&allocator_) HIf(parameter_));
- body->AddInstruction(new (&allocator_) HGoto());
+ header->AddInstruction(new (GetAllocator()) HIf(parameter_));
+ body->AddInstruction(new (GetAllocator()) HGoto());
- HPhi* phi = new (&allocator_) HPhi(&allocator_, 0, 0, DataType::Type::kInt32);
- HInstruction* add = new (&allocator_) HAdd(DataType::Type::kInt32, phi, parameter_);
+ HPhi* phi = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32);
+ HInstruction* add = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi, parameter_);
header->AddPhi(phi);
body->AddInstruction(add);
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index cae5054..1a537ca 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -56,13 +56,13 @@
DCHECK_EQ(visited->GetHighestBitSet(), -1);
// Nodes that we're currently visiting, indexed by block id.
- ArenaBitVector visiting(arena_, blocks_.size(), false, kArenaAllocGraphBuilder);
+ ArenaBitVector visiting(allocator_, blocks_.size(), false, kArenaAllocGraphBuilder);
// Number of successors visited from a given node, indexed by block id.
ArenaVector<size_t> successors_visited(blocks_.size(),
0u,
- arena_->Adapter(kArenaAllocGraphBuilder));
+ allocator_->Adapter(kArenaAllocGraphBuilder));
// Stack of nodes that we're currently visiting (same as marked in "visiting" above).
- ArenaVector<HBasicBlock*> worklist(arena_->Adapter(kArenaAllocGraphBuilder));
+ ArenaVector<HBasicBlock*> worklist(allocator_->Adapter(kArenaAllocGraphBuilder));
constexpr size_t kDefaultWorklistSize = 8;
worklist.reserve(kDefaultWorklistSize);
visited->SetBit(entry_block_->GetBlockId());
@@ -173,7 +173,7 @@
}
GraphAnalysisResult HGraph::BuildDominatorTree() {
- ArenaBitVector visited(arena_, blocks_.size(), false, kArenaAllocGraphBuilder);
+ ArenaBitVector visited(allocator_, blocks_.size(), false, kArenaAllocGraphBuilder);
// (1) Find the back edges in the graph doing a DFS traversal.
FindBackEdges(&visited);
@@ -259,13 +259,13 @@
reverse_post_order_.push_back(entry_block_);
// Number of visits of a given node, indexed by block id.
- ArenaVector<size_t> visits(blocks_.size(), 0u, arena_->Adapter(kArenaAllocGraphBuilder));
+ ArenaVector<size_t> visits(blocks_.size(), 0u, allocator_->Adapter(kArenaAllocGraphBuilder));
// Number of successors visited from a given node, indexed by block id.
ArenaVector<size_t> successors_visited(blocks_.size(),
0u,
- arena_->Adapter(kArenaAllocGraphBuilder));
+ allocator_->Adapter(kArenaAllocGraphBuilder));
// Nodes for which we need to visit successors.
- ArenaVector<HBasicBlock*> worklist(arena_->Adapter(kArenaAllocGraphBuilder));
+ ArenaVector<HBasicBlock*> worklist(allocator_->Adapter(kArenaAllocGraphBuilder));
constexpr size_t kDefaultWorklistSize = 8;
worklist.reserve(kDefaultWorklistSize);
worklist.push_back(entry_block_);
@@ -335,7 +335,7 @@
}
HBasicBlock* HGraph::SplitEdge(HBasicBlock* block, HBasicBlock* successor) {
- HBasicBlock* new_block = new (arena_) HBasicBlock(this, successor->GetDexPc());
+ HBasicBlock* new_block = new (allocator_) HBasicBlock(this, successor->GetDexPc());
AddBlock(new_block);
// Use `InsertBetween` to ensure the predecessor index and successor index of
// `block` and `successor` are preserved.
@@ -347,7 +347,7 @@
// Insert a new node between `block` and `successor` to split the
// critical edge.
HBasicBlock* new_block = SplitEdge(block, successor);
- new_block->AddInstruction(new (arena_) HGoto(successor->GetDexPc()));
+ new_block->AddInstruction(new (allocator_) HGoto(successor->GetDexPc()));
if (successor->IsLoopHeader()) {
// If we split at a back edge boundary, make the new block the back edge.
HLoopInformation* info = successor->GetLoopInformation();
@@ -396,9 +396,9 @@
// this graph.
size_t number_of_incomings = header->GetPredecessors().size() - info->NumberOfBackEdges();
if (number_of_incomings != 1 || (GetEntryBlock()->GetSingleSuccessor() == header)) {
- HBasicBlock* pre_header = new (arena_) HBasicBlock(this, header->GetDexPc());
+ HBasicBlock* pre_header = new (allocator_) HBasicBlock(this, header->GetDexPc());
AddBlock(pre_header);
- pre_header->AddInstruction(new (arena_) HGoto(header->GetDexPc()));
+ pre_header->AddInstruction(new (allocator_) HGoto(header->GetDexPc()));
for (size_t pred = 0; pred < header->GetPredecessors().size(); ++pred) {
HBasicBlock* predecessor = header->GetPredecessors()[pred];
@@ -440,7 +440,7 @@
try_entry != &block->GetTryCatchInformation()->GetTryEntry())) {
// We are either setting try block membership for the first time or it
// has changed.
- block->SetTryCatchInformation(new (arena_) TryCatchInformation(*try_entry));
+ block->SetTryCatchInformation(new (allocator_) TryCatchInformation(*try_entry));
}
}
}
@@ -547,7 +547,7 @@
// not null and not in a block. Otherwise, we need to clear the instruction
// id and/or any invariants the graph is assuming when adding new instructions.
if ((cached_null_constant_ == nullptr) || (cached_null_constant_->GetBlock() == nullptr)) {
- cached_null_constant_ = new (arena_) HNullConstant(dex_pc);
+ cached_null_constant_ = new (allocator_) HNullConstant(dex_pc);
cached_null_constant_->SetReferenceTypeInfo(inexact_object_rti_);
InsertConstant(cached_null_constant_);
}
@@ -563,7 +563,7 @@
// not null and not in a block. Otherwise, we need to clear the instruction
// id and/or any invariants the graph is assuming when adding new instructions.
if ((cached_current_method_ == nullptr) || (cached_current_method_->GetBlock() == nullptr)) {
- cached_current_method_ = new (arena_) HCurrentMethod(
+ cached_current_method_ = new (allocator_) HCurrentMethod(
Is64BitInstructionSet(instruction_set_) ? DataType::Type::kInt64 : DataType::Type::kInt32,
entry_block_->GetDexPc());
if (entry_block_->GetFirstInstruction() == nullptr) {
@@ -710,7 +710,7 @@
bool is_irreducible_loop = HasBackEdgeNotDominatedByHeader();
if (is_irreducible_loop) {
- ArenaBitVector visited(graph->GetArena(),
+ ArenaBitVector visited(graph->GetAllocator(),
graph->GetBlocks().size(),
/* expandable */ false,
kArenaAllocGraphBuilder);
@@ -1655,8 +1655,8 @@
DCHECK(!graph_->IsInSsaForm()) << "Support for SSA form not implemented.";
DCHECK_EQ(cursor->GetBlock(), this);
- HBasicBlock* new_block = new (GetGraph()->GetArena()) HBasicBlock(GetGraph(),
- cursor->GetDexPc());
+ HBasicBlock* new_block =
+ new (GetGraph()->GetAllocator()) HBasicBlock(GetGraph(), cursor->GetDexPc());
new_block->instructions_.first_instruction_ = cursor;
new_block->instructions_.last_instruction_ = instructions_.last_instruction_;
instructions_.last_instruction_ = cursor->previous_;
@@ -1668,7 +1668,7 @@
}
new_block->instructions_.SetBlockOfInstructions(new_block);
- AddInstruction(new (GetGraph()->GetArena()) HGoto(new_block->GetDexPc()));
+ AddInstruction(new (GetGraph()->GetAllocator()) HGoto(new_block->GetDexPc()));
for (HBasicBlock* successor : GetSuccessors()) {
successor->predecessors_[successor->GetPredecessorIndexOf(this)] = new_block;
@@ -1685,7 +1685,7 @@
DCHECK(!graph_->IsInSsaForm()) << "Support for SSA form not implemented.";
DCHECK(!IsCatchBlock()) << "Support for updating try/catch information not implemented.";
- HBasicBlock* new_block = new (GetGraph()->GetArena()) HBasicBlock(GetGraph(), GetDexPc());
+ HBasicBlock* new_block = new (GetGraph()->GetAllocator()) HBasicBlock(GetGraph(), GetDexPc());
for (HBasicBlock* predecessor : GetPredecessors()) {
predecessor->successors_[predecessor->GetSuccessorIndexOf(this)] = new_block;
@@ -1701,8 +1701,8 @@
HBasicBlock* HBasicBlock::SplitBeforeForInlining(HInstruction* cursor) {
DCHECK_EQ(cursor->GetBlock(), this);
- HBasicBlock* new_block = new (GetGraph()->GetArena()) HBasicBlock(GetGraph(),
- cursor->GetDexPc());
+ HBasicBlock* new_block =
+ new (GetGraph()->GetAllocator()) HBasicBlock(GetGraph(), cursor->GetDexPc());
new_block->instructions_.first_instruction_ = cursor;
new_block->instructions_.last_instruction_ = instructions_.last_instruction_;
instructions_.last_instruction_ = cursor->previous_;
@@ -1734,7 +1734,7 @@
DCHECK_NE(instructions_.last_instruction_, cursor);
DCHECK_EQ(cursor->GetBlock(), this);
- HBasicBlock* new_block = new (GetGraph()->GetArena()) HBasicBlock(GetGraph(), GetDexPc());
+ HBasicBlock* new_block = new (GetGraph()->GetAllocator()) HBasicBlock(GetGraph(), GetDexPc());
new_block->instructions_.first_instruction_ = cursor->GetNext();
new_block->instructions_.last_instruction_ = instructions_.last_instruction_;
cursor->next_->previous_ = nullptr;
@@ -2030,7 +2030,7 @@
last_instruction->IsPackedSwitch() ||
(last_instruction->IsTryBoundary() && IsCatchBlock()));
predecessor->RemoveInstruction(last_instruction);
- predecessor->AddInstruction(new (graph_->GetArena()) HGoto(last_instruction->GetDexPc()));
+ predecessor->AddInstruction(new (graph_->GetAllocator()) HGoto(last_instruction->GetDexPc()));
} else if (num_pred_successors == 0u) {
// The predecessor has no remaining successors and therefore must be dead.
// We deliberately leave it without a control-flow instruction so that the
@@ -2241,7 +2241,7 @@
if (current->NeedsEnvironment()) {
DCHECK(current->HasEnvironment());
current->GetEnvironment()->SetAndCopyParentChain(
- outer_graph->GetArena(), invoke->GetEnvironment());
+ outer_graph->GetAllocator(), invoke->GetEnvironment());
}
}
}
@@ -2294,7 +2294,7 @@
// into two blocks, merge the first block of the inlined graph into
// the first half, and replace the exit block of the inlined graph
// with the second half.
- ArenaAllocator* allocator = outer_graph->GetArena();
+ ArenaAllocator* allocator = outer_graph->GetAllocator();
HBasicBlock* at = invoke->GetBlock();
// Note that we split before the invoke only to simplify polymorphic inlining.
HBasicBlock* to = at->SplitBeforeForInlining(invoke);
@@ -2478,10 +2478,10 @@
HBasicBlock* old_pre_header = header->GetDominator();
// Need extra block to avoid critical edge.
- HBasicBlock* if_block = new (arena_) HBasicBlock(this, header->GetDexPc());
- HBasicBlock* true_block = new (arena_) HBasicBlock(this, header->GetDexPc());
- HBasicBlock* false_block = new (arena_) HBasicBlock(this, header->GetDexPc());
- HBasicBlock* new_pre_header = new (arena_) HBasicBlock(this, header->GetDexPc());
+ HBasicBlock* if_block = new (allocator_) HBasicBlock(this, header->GetDexPc());
+ HBasicBlock* true_block = new (allocator_) HBasicBlock(this, header->GetDexPc());
+ HBasicBlock* false_block = new (allocator_) HBasicBlock(this, header->GetDexPc());
+ HBasicBlock* new_pre_header = new (allocator_) HBasicBlock(this, header->GetDexPc());
AddBlock(if_block);
AddBlock(true_block);
AddBlock(false_block);
@@ -2536,9 +2536,9 @@
HLoopInformation* loop = header->GetLoopInformation();
// Add new loop blocks.
- HBasicBlock* new_pre_header = new (arena_) HBasicBlock(this, header->GetDexPc());
- HBasicBlock* new_header = new (arena_) HBasicBlock(this, header->GetDexPc());
- HBasicBlock* new_body = new (arena_) HBasicBlock(this, header->GetDexPc());
+ HBasicBlock* new_pre_header = new (allocator_) HBasicBlock(this, header->GetDexPc());
+ HBasicBlock* new_header = new (allocator_) HBasicBlock(this, header->GetDexPc());
+ HBasicBlock* new_body = new (allocator_) HBasicBlock(this, header->GetDexPc());
AddBlock(new_pre_header);
AddBlock(new_header);
AddBlock(new_body);
@@ -2570,10 +2570,10 @@
reverse_post_order_[index_of_body] = new_body;
// Add gotos and suspend check (client must add conditional in header).
- new_pre_header->AddInstruction(new (arena_) HGoto());
- HSuspendCheck* suspend_check = new (arena_) HSuspendCheck(header->GetDexPc());
+ new_pre_header->AddInstruction(new (allocator_) HGoto());
+ HSuspendCheck* suspend_check = new (allocator_) HSuspendCheck(header->GetDexPc());
new_header->AddInstruction(suspend_check);
- new_body->AddInstruction(new (arena_) HGoto());
+ new_body->AddInstruction(new (allocator_) HGoto());
suspend_check->CopyEnvironmentFromWithLoopPhiAdjustment(
loop->GetSuspendCheck()->GetEnvironment(), header);
@@ -2891,7 +2891,7 @@
// Returns an instruction with the opposite Boolean value from 'cond'.
HInstruction* HGraph::InsertOppositeCondition(HInstruction* cond, HInstruction* cursor) {
- ArenaAllocator* allocator = GetArena();
+ ArenaAllocator* allocator = GetAllocator();
if (cond->IsCondition() &&
!DataType::IsFloatingPointType(cond->InputAt(0)->GetType())) {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index fef0c86..99fde75 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -45,6 +45,7 @@
namespace art {
+class ArenaStack;
class GraphChecker;
class HBasicBlock;
class HConstructorFence;
@@ -305,7 +306,8 @@
// Control-flow graph of a method. Contains a list of basic blocks.
class HGraph : public ArenaObject<kArenaAllocGraph> {
public:
- HGraph(ArenaAllocator* arena,
+ HGraph(ArenaAllocator* allocator,
+ ArenaStack* arena_stack,
const DexFile& dex_file,
uint32_t method_idx,
InstructionSet instruction_set,
@@ -313,10 +315,11 @@
bool debuggable = false,
bool osr = false,
int start_instruction_id = 0)
- : arena_(arena),
- blocks_(arena->Adapter(kArenaAllocBlockList)),
- reverse_post_order_(arena->Adapter(kArenaAllocReversePostOrder)),
- linear_order_(arena->Adapter(kArenaAllocLinearOrder)),
+ : allocator_(allocator),
+ arena_stack_(arena_stack),
+ blocks_(allocator->Adapter(kArenaAllocBlockList)),
+ reverse_post_order_(allocator->Adapter(kArenaAllocReversePostOrder)),
+ linear_order_(allocator->Adapter(kArenaAllocLinearOrder)),
entry_block_(nullptr),
exit_block_(nullptr),
maximum_number_of_out_vregs_(0),
@@ -337,22 +340,23 @@
number_of_cha_guards_(0),
instruction_set_(instruction_set),
cached_null_constant_(nullptr),
- cached_int_constants_(std::less<int32_t>(), arena->Adapter(kArenaAllocConstantsMap)),
- cached_float_constants_(std::less<int32_t>(), arena->Adapter(kArenaAllocConstantsMap)),
- cached_long_constants_(std::less<int64_t>(), arena->Adapter(kArenaAllocConstantsMap)),
- cached_double_constants_(std::less<int64_t>(), arena->Adapter(kArenaAllocConstantsMap)),
+ cached_int_constants_(std::less<int32_t>(), allocator->Adapter(kArenaAllocConstantsMap)),
+ cached_float_constants_(std::less<int32_t>(), allocator->Adapter(kArenaAllocConstantsMap)),
+ cached_long_constants_(std::less<int64_t>(), allocator->Adapter(kArenaAllocConstantsMap)),
+ cached_double_constants_(std::less<int64_t>(), allocator->Adapter(kArenaAllocConstantsMap)),
cached_current_method_(nullptr),
art_method_(nullptr),
inexact_object_rti_(ReferenceTypeInfo::CreateInvalid()),
osr_(osr),
- cha_single_implementation_list_(arena->Adapter(kArenaAllocCHA)) {
+ cha_single_implementation_list_(allocator->Adapter(kArenaAllocCHA)) {
blocks_.reserve(kDefaultNumberOfBlocks);
}
// Acquires and stores RTI of inexact Object to be used when creating HNullConstant.
void InitializeInexactObjectRTI(VariableSizedHandleScope* handles);
- ArenaAllocator* GetArena() const { return arena_; }
+ ArenaAllocator* GetAllocator() const { return allocator_; }
+ ArenaStack* GetArenaStack() const { return arena_stack_; }
const ArenaVector<HBasicBlock*>& GetBlocks() const { return blocks_; }
bool IsInSsaForm() const { return in_ssa_form_; }
@@ -613,7 +617,7 @@
// If not found or previously deleted, create and cache a new instruction.
// Don't bother reviving a previously deleted instruction, for simplicity.
if (constant == nullptr || constant->GetBlock() == nullptr) {
- constant = new (arena_) InstructionType(value, dex_pc);
+ constant = new (allocator_) InstructionType(value, dex_pc);
cache->Overwrite(value, constant);
InsertConstant(constant);
}
@@ -629,7 +633,8 @@
// See CacheFloatConstant comment.
void CacheDoubleConstant(HDoubleConstant* constant);
- ArenaAllocator* const arena_;
+ ArenaAllocator* const allocator_;
+ ArenaStack* const arena_stack_;
// List of blocks in insertion order.
ArenaVector<HBasicBlock*> blocks_;
@@ -751,9 +756,12 @@
suspend_check_(nullptr),
irreducible_(false),
contains_irreducible_loop_(false),
- back_edges_(graph->GetArena()->Adapter(kArenaAllocLoopInfoBackEdges)),
+ back_edges_(graph->GetAllocator()->Adapter(kArenaAllocLoopInfoBackEdges)),
// Make bit vector growable, as the number of blocks may change.
- blocks_(graph->GetArena(), graph->GetBlocks().size(), true, kArenaAllocLoopInfoBackEdges) {
+ blocks_(graph->GetAllocator(),
+ graph->GetBlocks().size(),
+ true,
+ kArenaAllocLoopInfoBackEdges) {
back_edges_.reserve(kDefaultNumberOfBackEdges);
}
@@ -916,11 +924,11 @@
public:
explicit HBasicBlock(HGraph* graph, uint32_t dex_pc = kNoDexPc)
: graph_(graph),
- predecessors_(graph->GetArena()->Adapter(kArenaAllocPredecessors)),
- successors_(graph->GetArena()->Adapter(kArenaAllocSuccessors)),
+ predecessors_(graph->GetAllocator()->Adapter(kArenaAllocPredecessors)),
+ successors_(graph->GetAllocator()->Adapter(kArenaAllocSuccessors)),
loop_information_(nullptr),
dominator_(nullptr),
- dominated_blocks_(graph->GetArena()->Adapter(kArenaAllocDominated)),
+ dominated_blocks_(graph->GetAllocator()->Adapter(kArenaAllocDominated)),
block_id_(kInvalidBlockId),
dex_pc_(dex_pc),
lifetime_start_(kNoLifetime),
@@ -972,7 +980,7 @@
void AddBackEdge(HBasicBlock* back_edge) {
if (loop_information_ == nullptr) {
- loop_information_ = new (graph_->GetArena()) HLoopInformation(this, graph_);
+ loop_information_ = new (graph_->GetAllocator()) HLoopInformation(this, graph_);
}
DCHECK_EQ(loop_information_->GetHeader(), this);
loop_information_->AddBackEdge(back_edge);
@@ -1792,21 +1800,23 @@
// A HEnvironment object contains the values of virtual registers at a given location.
class HEnvironment : public ArenaObject<kArenaAllocEnvironment> {
public:
- ALWAYS_INLINE HEnvironment(ArenaAllocator* arena,
+ ALWAYS_INLINE HEnvironment(ArenaAllocator* allocator,
size_t number_of_vregs,
ArtMethod* method,
uint32_t dex_pc,
HInstruction* holder)
- : vregs_(number_of_vregs, arena->Adapter(kArenaAllocEnvironmentVRegs)),
- locations_(arena->Adapter(kArenaAllocEnvironmentLocations)),
+ : vregs_(number_of_vregs, allocator->Adapter(kArenaAllocEnvironmentVRegs)),
+ locations_(allocator->Adapter(kArenaAllocEnvironmentLocations)),
parent_(nullptr),
method_(method),
dex_pc_(dex_pc),
holder_(holder) {
}
- ALWAYS_INLINE HEnvironment(ArenaAllocator* arena, const HEnvironment& to_copy, HInstruction* holder)
- : HEnvironment(arena,
+ ALWAYS_INLINE HEnvironment(ArenaAllocator* allocator,
+ const HEnvironment& to_copy,
+ HInstruction* holder)
+ : HEnvironment(allocator,
to_copy.Size(),
to_copy.GetMethod(),
to_copy.GetDexPc(),
@@ -1925,7 +1935,7 @@
HInstruction* GetPreviousDisregardingMoves() const;
HBasicBlock* GetBlock() const { return block_; }
- ArenaAllocator* GetArena() const { return block_->GetGraph()->GetArena(); }
+ ArenaAllocator* GetAllocator() const { return block_->GetGraph()->GetAllocator(); }
void SetBlock(HBasicBlock* block) { block_ = block; }
bool IsInBlock() const { return block_ != nullptr; }
bool IsInLoop() const { return block_->IsInLoop(); }
@@ -2015,7 +2025,7 @@
// Note: fixup_end remains valid across push_front().
auto fixup_end = uses_.empty() ? uses_.begin() : ++uses_.begin();
HUseListNode<HInstruction*>* new_node =
- new (GetBlock()->GetGraph()->GetArena()) HUseListNode<HInstruction*>(user, index);
+ new (GetBlock()->GetGraph()->GetAllocator()) HUseListNode<HInstruction*>(user, index);
uses_.push_front(*new_node);
FixUpUserRecordsAfterUseInsertion(fixup_end);
}
@@ -2025,7 +2035,7 @@
// Note: env_fixup_end remains valid across push_front().
auto env_fixup_end = env_uses_.empty() ? env_uses_.begin() : ++env_uses_.begin();
HUseListNode<HEnvironment*>* new_node =
- new (GetBlock()->GetGraph()->GetArena()) HUseListNode<HEnvironment*>(user, index);
+ new (GetBlock()->GetGraph()->GetAllocator()) HUseListNode<HEnvironment*>(user, index);
env_uses_.push_front(*new_node);
FixUpUserRecordsAfterEnvUseInsertion(env_fixup_end);
}
@@ -2108,7 +2118,7 @@
// copying, the uses lists are being updated.
void CopyEnvironmentFrom(HEnvironment* environment) {
DCHECK(environment_ == nullptr);
- ArenaAllocator* allocator = GetBlock()->GetGraph()->GetArena();
+ ArenaAllocator* allocator = GetBlock()->GetGraph()->GetAllocator();
environment_ = new (allocator) HEnvironment(allocator, *environment, this);
environment_->CopyFrom(environment);
if (environment->GetParent() != nullptr) {
@@ -2119,7 +2129,7 @@
void CopyEnvironmentFromWithLoopPhiAdjustment(HEnvironment* environment,
HBasicBlock* block) {
DCHECK(environment_ == nullptr);
- ArenaAllocator* allocator = GetBlock()->GetGraph()->GetArena();
+ ArenaAllocator* allocator = GetBlock()->GetGraph()->GetAllocator();
environment_ = new (allocator) HEnvironment(allocator, *environment, this);
environment_->CopyFromWithLoopPhiAdjustment(environment, block);
if (environment->GetParent() != nullptr) {
@@ -2467,11 +2477,11 @@
protected:
HVariableInputSizeInstruction(SideEffects side_effects,
uint32_t dex_pc,
- ArenaAllocator* arena,
+ ArenaAllocator* allocator,
size_t number_of_inputs,
ArenaAllocKind kind)
: HInstruction(side_effects, dex_pc),
- inputs_(number_of_inputs, arena->Adapter(kind)) {}
+ inputs_(number_of_inputs, allocator->Adapter(kind)) {}
ArenaVector<HUserRecord<HInstruction*>> inputs_;
@@ -2572,7 +2582,7 @@
class HPhi FINAL : public HVariableInputSizeInstruction {
public:
- HPhi(ArenaAllocator* arena,
+ HPhi(ArenaAllocator* allocator,
uint32_t reg_number,
size_t number_of_inputs,
DataType::Type type,
@@ -2580,7 +2590,7 @@
: HVariableInputSizeInstruction(
SideEffects::None(),
dex_pc,
- arena,
+ allocator,
number_of_inputs,
kArenaAllocPhiInputs),
reg_number_(reg_number) {
@@ -3019,11 +3029,14 @@
public:
// Use this constructor when the `HDeoptimize` acts as a barrier, where no code can move
// across.
- HDeoptimize(ArenaAllocator* arena, HInstruction* cond, DeoptimizationKind kind, uint32_t dex_pc)
+ HDeoptimize(ArenaAllocator* allocator,
+ HInstruction* cond,
+ DeoptimizationKind kind,
+ uint32_t dex_pc)
: HVariableInputSizeInstruction(
SideEffects::All(),
dex_pc,
- arena,
+ allocator,
/* number_of_inputs */ 1,
kArenaAllocMisc) {
SetPackedFlag<kFieldCanBeMoved>(false);
@@ -3036,7 +3049,7 @@
// instead of `guard`.
// We set CanTriggerGC to prevent any intermediate address to be live
// at the point of the `HDeoptimize`.
- HDeoptimize(ArenaAllocator* arena,
+ HDeoptimize(ArenaAllocator* allocator,
HInstruction* cond,
HInstruction* guard,
DeoptimizationKind kind,
@@ -3044,7 +3057,7 @@
: HVariableInputSizeInstruction(
SideEffects::CanTriggerGC(),
dex_pc,
- arena,
+ allocator,
/* number_of_inputs */ 2,
kArenaAllocMisc) {
SetPackedFlag<kFieldCanBeMoved>(true);
@@ -3108,8 +3121,8 @@
public:
// CHA guards are only optimized in a separate pass and it has no side effects
// with regard to other passes.
- HShouldDeoptimizeFlag(ArenaAllocator* arena, uint32_t dex_pc)
- : HVariableInputSizeInstruction(SideEffects::None(), dex_pc, arena, 0, kArenaAllocCHA) {
+ HShouldDeoptimizeFlag(ArenaAllocator* allocator, uint32_t dex_pc)
+ : HVariableInputSizeInstruction(SideEffects::None(), dex_pc, allocator, 0, kArenaAllocCHA) {
}
DataType::Type GetType() const OVERRIDE { return DataType::Type::kInt32; }
@@ -4076,7 +4089,7 @@
using InvokeTypeField = BitField<InvokeType, kFieldInvokeType, kFieldInvokeTypeSize>;
using ReturnTypeField = BitField<DataType::Type, kFieldReturnType, kFieldReturnTypeSize>;
- HInvoke(ArenaAllocator* arena,
+ HInvoke(ArenaAllocator* allocator,
uint32_t number_of_arguments,
uint32_t number_of_other_inputs,
DataType::Type return_type,
@@ -4087,7 +4100,7 @@
: HVariableInputSizeInstruction(
SideEffects::AllExceptGCDependency(), // Assume write/read on all fields/arrays.
dex_pc,
- arena,
+ allocator,
number_of_arguments + number_of_other_inputs,
kArenaAllocInvokeInputs),
number_of_arguments_(number_of_arguments),
@@ -4114,13 +4127,13 @@
class HInvokeUnresolved FINAL : public HInvoke {
public:
- HInvokeUnresolved(ArenaAllocator* arena,
+ HInvokeUnresolved(ArenaAllocator* allocator,
uint32_t number_of_arguments,
DataType::Type return_type,
uint32_t dex_pc,
uint32_t dex_method_index,
InvokeType invoke_type)
- : HInvoke(arena,
+ : HInvoke(allocator,
number_of_arguments,
0u /* number_of_other_inputs */,
return_type,
@@ -4138,12 +4151,12 @@
class HInvokePolymorphic FINAL : public HInvoke {
public:
- HInvokePolymorphic(ArenaAllocator* arena,
+ HInvokePolymorphic(ArenaAllocator* allocator,
uint32_t number_of_arguments,
DataType::Type return_type,
uint32_t dex_pc,
uint32_t dex_method_index)
- : HInvoke(arena,
+ : HInvoke(allocator,
number_of_arguments,
0u /* number_of_other_inputs */,
return_type,
@@ -4215,7 +4228,7 @@
uint64_t method_load_data;
};
- HInvokeStaticOrDirect(ArenaAllocator* arena,
+ HInvokeStaticOrDirect(ArenaAllocator* allocator,
uint32_t number_of_arguments,
DataType::Type return_type,
uint32_t dex_pc,
@@ -4225,7 +4238,7 @@
InvokeType invoke_type,
MethodReference target_method,
ClinitCheckRequirement clinit_check_requirement)
- : HInvoke(arena,
+ : HInvoke(allocator,
number_of_arguments,
// There is potentially one extra argument for the HCurrentMethod node, and
// potentially one other if the clinit check is explicit, and potentially
@@ -4410,14 +4423,14 @@
class HInvokeVirtual FINAL : public HInvoke {
public:
- HInvokeVirtual(ArenaAllocator* arena,
+ HInvokeVirtual(ArenaAllocator* allocator,
uint32_t number_of_arguments,
DataType::Type return_type,
uint32_t dex_pc,
uint32_t dex_method_index,
ArtMethod* resolved_method,
uint32_t vtable_index)
- : HInvoke(arena,
+ : HInvoke(allocator,
number_of_arguments,
0u,
return_type,
@@ -4458,14 +4471,14 @@
class HInvokeInterface FINAL : public HInvoke {
public:
- HInvokeInterface(ArenaAllocator* arena,
+ HInvokeInterface(ArenaAllocator* allocator,
uint32_t number_of_arguments,
DataType::Type return_type,
uint32_t dex_pc,
uint32_t dex_method_index,
ArtMethod* resolved_method,
uint32_t imt_index)
- : HInvoke(arena,
+ : HInvoke(allocator,
number_of_arguments,
0u,
return_type,
@@ -6637,7 +6650,7 @@
// about the associated object.
HConstructorFence(HInstruction* fence_object,
uint32_t dex_pc,
- ArenaAllocator* arena)
+ ArenaAllocator* allocator)
// We strongly suspect there is not a more accurate way to describe the fine-grained reordering
// constraints described in the class header. We claim that these SideEffects constraints
// enforce a superset of the real constraints.
@@ -6661,7 +6674,7 @@
// we can refine the side effect to a smaller set of type reads (see above constraints).
: HVariableInputSizeInstruction(SideEffects::AllReads(),
dex_pc,
- arena,
+ allocator,
/* number_of_inputs */ 1,
kArenaAllocConstructorFenceInputs) {
DCHECK(fence_object != nullptr);
@@ -6878,9 +6891,9 @@
class HParallelMove FINAL : public HTemplateInstruction<0> {
public:
- explicit HParallelMove(ArenaAllocator* arena, uint32_t dex_pc = kNoDexPc)
+ explicit HParallelMove(ArenaAllocator* allocator, uint32_t dex_pc = kNoDexPc)
: HTemplateInstruction(SideEffects::None(), dex_pc),
- moves_(arena->Adapter(kArenaAllocMoveOperands)) {
+ moves_(allocator->Adapter(kArenaAllocMoveOperands)) {
moves_.reserve(kDefaultNumberOfMoves);
}
diff --git a/compiler/optimizing/nodes_test.cc b/compiler/optimizing/nodes_test.cc
index ada6177..b2180d9 100644
--- a/compiler/optimizing/nodes_test.cc
+++ b/compiler/optimizing/nodes_test.cc
@@ -23,37 +23,36 @@
namespace art {
+class NodeTest : public OptimizingUnitTest {};
+
/**
* Test that removing instruction from the graph removes itself from user lists
* and environment lists.
*/
-TEST(Node, RemoveInstruction) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+TEST_F(NodeTest, RemoveInstruction) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
entry->AddInstruction(parameter);
- entry->AddInstruction(new (&allocator) HGoto());
+ entry->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* first_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* first_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(first_block);
entry->AddSuccessor(first_block);
- HInstruction* null_check = new (&allocator) HNullCheck(parameter, 0);
+ HInstruction* null_check = new (GetAllocator()) HNullCheck(parameter, 0);
first_block->AddInstruction(null_check);
- first_block->AddInstruction(new (&allocator) HReturnVoid());
+ first_block->AddInstruction(new (GetAllocator()) HReturnVoid());
- HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(exit_block);
first_block->AddSuccessor(exit_block);
- exit_block->AddInstruction(new (&allocator) HExit());
+ exit_block->AddInstruction(new (GetAllocator()) HExit());
- HEnvironment* environment = new (&allocator) HEnvironment(
- &allocator, 1, graph->GetArtMethod(), 0, null_check);
+ HEnvironment* environment = new (GetAllocator()) HEnvironment(
+ GetAllocator(), 1, graph->GetArtMethod(), 0, null_check);
null_check->SetRawEnvironment(environment);
environment->SetRawEnvAt(0, parameter);
parameter->AddEnvUseAt(null_check->GetEnvironment(), 0);
@@ -70,25 +69,22 @@
/**
* Test that inserting an instruction in the graph updates user lists.
*/
-TEST(Node, InsertInstruction) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+TEST_F(NodeTest, InsertInstruction) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter1 = new (&allocator) HParameterValue(
+ HInstruction* parameter1 = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
- HInstruction* parameter2 = new (&allocator) HParameterValue(
+ HInstruction* parameter2 = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
- entry->AddInstruction(new (&allocator) HExit());
+ entry->AddInstruction(new (GetAllocator()) HExit());
ASSERT_FALSE(parameter1->HasUses());
- HInstruction* to_insert = new (&allocator) HNullCheck(parameter1, 0);
+ HInstruction* to_insert = new (GetAllocator()) HNullCheck(parameter1, 0);
entry->InsertInstructionBefore(to_insert, parameter2);
ASSERT_TRUE(parameter1->HasUses());
@@ -98,48 +94,42 @@
/**
* Test that adding an instruction in the graph updates user lists.
*/
-TEST(Node, AddInstruction) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+TEST_F(NodeTest, AddInstruction) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
entry->AddInstruction(parameter);
ASSERT_FALSE(parameter->HasUses());
- HInstruction* to_add = new (&allocator) HNullCheck(parameter, 0);
+ HInstruction* to_add = new (GetAllocator()) HNullCheck(parameter, 0);
entry->AddInstruction(to_add);
ASSERT_TRUE(parameter->HasUses());
ASSERT_TRUE(parameter->GetUses().HasExactlyOneElement());
}
-TEST(Node, ParentEnvironment) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+TEST_F(NodeTest, ParentEnvironment) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter1 = new (&allocator) HParameterValue(
+ HInstruction* parameter1 = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
- HInstruction* with_environment = new (&allocator) HNullCheck(parameter1, 0);
+ HInstruction* with_environment = new (GetAllocator()) HNullCheck(parameter1, 0);
entry->AddInstruction(parameter1);
entry->AddInstruction(with_environment);
- entry->AddInstruction(new (&allocator) HExit());
+ entry->AddInstruction(new (GetAllocator()) HExit());
ASSERT_TRUE(parameter1->HasUses());
ASSERT_TRUE(parameter1->GetUses().HasExactlyOneElement());
- HEnvironment* environment = new (&allocator) HEnvironment(
- &allocator, 1, graph->GetArtMethod(), 0, with_environment);
- ArenaVector<HInstruction*> array(allocator.Adapter());
+ HEnvironment* environment = new (GetAllocator()) HEnvironment(
+ GetAllocator(), 1, graph->GetArtMethod(), 0, with_environment);
+ ArenaVector<HInstruction*> array(GetAllocator()->Adapter());
array.push_back(parameter1);
environment->CopyFrom(array);
@@ -148,22 +138,22 @@
ASSERT_TRUE(parameter1->HasEnvironmentUses());
ASSERT_TRUE(parameter1->GetEnvUses().HasExactlyOneElement());
- HEnvironment* parent1 = new (&allocator) HEnvironment(
- &allocator, 1, graph->GetArtMethod(), 0, nullptr);
+ HEnvironment* parent1 = new (GetAllocator()) HEnvironment(
+ GetAllocator(), 1, graph->GetArtMethod(), 0, nullptr);
parent1->CopyFrom(array);
ASSERT_EQ(parameter1->GetEnvUses().SizeSlow(), 2u);
- HEnvironment* parent2 = new (&allocator) HEnvironment(
- &allocator, 1, graph->GetArtMethod(), 0, nullptr);
+ HEnvironment* parent2 = new (GetAllocator()) HEnvironment(
+ GetAllocator(), 1, graph->GetArtMethod(), 0, nullptr);
parent2->CopyFrom(array);
- parent1->SetAndCopyParentChain(&allocator, parent2);
+ parent1->SetAndCopyParentChain(GetAllocator(), parent2);
// One use for parent2, and one other use for the new parent of parent1.
ASSERT_EQ(parameter1->GetEnvUses().SizeSlow(), 4u);
// We have copied the parent chain. So we now have two more uses.
- environment->SetAndCopyParentChain(&allocator, parent1);
+ environment->SetAndCopyParentChain(GetAllocator(), parent1);
ASSERT_EQ(parameter1->GetEnvUses().SizeSlow(), 6u);
}
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index 8f3ab11..781a59f 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -71,7 +71,7 @@
// TODO: we could introduce SIMD types in HIR.
static constexpr DataType::Type kSIMDType = DataType::Type::kFloat64;
- HVecOperation(ArenaAllocator* arena,
+ HVecOperation(ArenaAllocator* allocator,
DataType::Type packed_type,
SideEffects side_effects,
size_t number_of_inputs,
@@ -79,7 +79,7 @@
uint32_t dex_pc)
: HVariableInputSizeInstruction(side_effects,
dex_pc,
- arena,
+ allocator,
number_of_inputs,
kArenaAllocVectorNode),
vector_length_(vector_length) {
@@ -156,12 +156,12 @@
// Abstraction of a unary vector operation.
class HVecUnaryOperation : public HVecOperation {
public:
- HVecUnaryOperation(ArenaAllocator* arena,
+ HVecUnaryOperation(ArenaAllocator* allocator,
HInstruction* input,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecOperation(arena,
+ : HVecOperation(allocator,
packed_type,
SideEffects::None(),
/* number_of_inputs */ 1,
@@ -181,13 +181,13 @@
// Abstraction of a binary vector operation.
class HVecBinaryOperation : public HVecOperation {
public:
- HVecBinaryOperation(ArenaAllocator* arena,
+ HVecBinaryOperation(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecOperation(arena,
+ : HVecOperation(allocator,
packed_type,
SideEffects::None(),
/* number_of_inputs */ 2,
@@ -210,13 +210,18 @@
// The Android runtime guarantees elements have at least natural alignment.
class HVecMemoryOperation : public HVecOperation {
public:
- HVecMemoryOperation(ArenaAllocator* arena,
+ HVecMemoryOperation(ArenaAllocator* allocator,
DataType::Type packed_type,
SideEffects side_effects,
size_t number_of_inputs,
size_t vector_length,
uint32_t dex_pc)
- : HVecOperation(arena, packed_type, side_effects, number_of_inputs, vector_length, dex_pc),
+ : HVecOperation(allocator,
+ packed_type,
+ side_effects,
+ number_of_inputs,
+ vector_length,
+ dex_pc),
alignment_(DataType::Size(packed_type), 0) {
DCHECK_GE(number_of_inputs, 2u);
}
@@ -260,12 +265,12 @@
// viz. replicate(x) = [ x, .. , x ].
class HVecReplicateScalar FINAL : public HVecUnaryOperation {
public:
- HVecReplicateScalar(ArenaAllocator* arena,
+ HVecReplicateScalar(ArenaAllocator* allocator,
HInstruction* scalar,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecUnaryOperation(arena, scalar, packed_type, vector_length, dex_pc) {
+ : HVecUnaryOperation(allocator, scalar, packed_type, vector_length, dex_pc) {
DCHECK(!scalar->IsVecOperation());
}
@@ -285,13 +290,13 @@
// TODO: for now only i == 1 case supported.
class HVecExtractScalar FINAL : public HVecUnaryOperation {
public:
- HVecExtractScalar(ArenaAllocator* arena,
+ HVecExtractScalar(ArenaAllocator* allocator,
HInstruction* input,
DataType::Type packed_type,
size_t vector_length,
size_t index,
uint32_t dex_pc)
- : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) {
+ : HVecUnaryOperation(allocator, input, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(input, packed_type));
DCHECK_LT(index, vector_length);
DCHECK_EQ(index, 0u);
@@ -323,13 +328,13 @@
kMax = 3
};
- HVecReduce(ArenaAllocator* arena,
+ HVecReduce(ArenaAllocator* allocator,
HInstruction* input,
DataType::Type packed_type,
size_t vector_length,
ReductionKind kind,
uint32_t dex_pc)
- : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc),
+ : HVecUnaryOperation(allocator, input, packed_type, vector_length, dex_pc),
kind_(kind) {
DCHECK(HasConsistentPackedTypes(input, packed_type));
}
@@ -356,12 +361,12 @@
// viz. cnv[ x1, .. , xn ] = [ cnv(x1), .. , cnv(xn) ].
class HVecCnv FINAL : public HVecUnaryOperation {
public:
- HVecCnv(ArenaAllocator* arena,
+ HVecCnv(ArenaAllocator* allocator,
HInstruction* input,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) {
+ : HVecUnaryOperation(allocator, input, packed_type, vector_length, dex_pc) {
DCHECK(input->IsVecOperation());
DCHECK_NE(GetInputType(), GetResultType()); // actual convert
}
@@ -381,12 +386,12 @@
// viz. neg[ x1, .. , xn ] = [ -x1, .. , -xn ].
class HVecNeg FINAL : public HVecUnaryOperation {
public:
- HVecNeg(ArenaAllocator* arena,
+ HVecNeg(ArenaAllocator* allocator,
HInstruction* input,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) {
+ : HVecUnaryOperation(allocator, input, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(input, packed_type));
}
@@ -403,12 +408,12 @@
// for signed operand x.
class HVecAbs FINAL : public HVecUnaryOperation {
public:
- HVecAbs(ArenaAllocator* arena,
+ HVecAbs(ArenaAllocator* allocator,
HInstruction* input,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) {
+ : HVecUnaryOperation(allocator, input, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(input, packed_type));
}
@@ -425,12 +430,12 @@
// not[ x1, .. , xn ] = [ !x1, .. , !xn ] for boolean.
class HVecNot FINAL : public HVecUnaryOperation {
public:
- HVecNot(ArenaAllocator* arena,
+ HVecNot(ArenaAllocator* allocator,
HInstruction* input,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) {
+ : HVecUnaryOperation(allocator, input, packed_type, vector_length, dex_pc) {
DCHECK(input->IsVecOperation());
}
@@ -450,13 +455,13 @@
// viz. [ x1, .. , xn ] + [ y1, .. , yn ] = [ x1 + y1, .. , xn + yn ].
class HVecAdd FINAL : public HVecBinaryOperation {
public:
- HVecAdd(ArenaAllocator* arena,
+ HVecAdd(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
@@ -475,7 +480,7 @@
// for either both signed or both unsigned operands x, y.
class HVecHalvingAdd FINAL : public HVecBinaryOperation {
public:
- HVecHalvingAdd(ArenaAllocator* arena,
+ HVecHalvingAdd(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
@@ -483,7 +488,7 @@
bool is_rounded,
bool is_unsigned,
uint32_t dex_pc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
// The `is_unsigned` flag should be used exclusively with the Int32 or Int64.
// This flag is a temporary measure while we do not have the Uint32 and Uint64 data types.
DCHECK(!is_unsigned ||
@@ -524,13 +529,13 @@
// viz. [ x1, .. , xn ] - [ y1, .. , yn ] = [ x1 - y1, .. , xn - yn ].
class HVecSub FINAL : public HVecBinaryOperation {
public:
- HVecSub(ArenaAllocator* arena,
+ HVecSub(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
@@ -547,13 +552,13 @@
// viz. [ x1, .. , xn ] * [ y1, .. , yn ] = [ x1 * y1, .. , xn * yn ].
class HVecMul FINAL : public HVecBinaryOperation {
public:
- HVecMul(ArenaAllocator* arena,
+ HVecMul(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
@@ -570,13 +575,13 @@
// viz. [ x1, .. , xn ] / [ y1, .. , yn ] = [ x1 / y1, .. , xn / yn ].
class HVecDiv FINAL : public HVecBinaryOperation {
public:
- HVecDiv(ArenaAllocator* arena,
+ HVecDiv(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
@@ -594,14 +599,14 @@
// for either both signed or both unsigned operands x, y.
class HVecMin FINAL : public HVecBinaryOperation {
public:
- HVecMin(ArenaAllocator* arena,
+ HVecMin(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
bool is_unsigned,
uint32_t dex_pc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
// The `is_unsigned` flag should be used exclusively with the Int32 or Int64.
// This flag is a temporary measure while we do not have the Uint32 and Uint64 data types.
DCHECK(!is_unsigned ||
@@ -638,14 +643,14 @@
// for either both signed or both unsigned operands x, y.
class HVecMax FINAL : public HVecBinaryOperation {
public:
- HVecMax(ArenaAllocator* arena,
+ HVecMax(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
bool is_unsigned,
uint32_t dex_pc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
// The `is_unsigned` flag should be used exclusively with the Int32 or Int64.
// This flag is a temporary measure while we do not have the Uint32 and Uint64 data types.
DCHECK(!is_unsigned ||
@@ -681,13 +686,13 @@
// viz. [ x1, .. , xn ] & [ y1, .. , yn ] = [ x1 & y1, .. , xn & yn ].
class HVecAnd FINAL : public HVecBinaryOperation {
public:
- HVecAnd(ArenaAllocator* arena,
+ HVecAnd(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(left->IsVecOperation() && right->IsVecOperation());
}
@@ -703,13 +708,13 @@
// viz. [ x1, .. , xn ] and-not [ y1, .. , yn ] = [ ~x1 & y1, .. , ~xn & yn ].
class HVecAndNot FINAL : public HVecBinaryOperation {
public:
- HVecAndNot(ArenaAllocator* arena,
+ HVecAndNot(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(left->IsVecOperation() && right->IsVecOperation());
}
@@ -725,13 +730,13 @@
// viz. [ x1, .. , xn ] | [ y1, .. , yn ] = [ x1 | y1, .. , xn | yn ].
class HVecOr FINAL : public HVecBinaryOperation {
public:
- HVecOr(ArenaAllocator* arena,
+ HVecOr(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(left->IsVecOperation() && right->IsVecOperation());
}
@@ -747,13 +752,13 @@
// viz. [ x1, .. , xn ] ^ [ y1, .. , yn ] = [ x1 ^ y1, .. , xn ^ yn ].
class HVecXor FINAL : public HVecBinaryOperation {
public:
- HVecXor(ArenaAllocator* arena,
+ HVecXor(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(left->IsVecOperation() && right->IsVecOperation());
}
@@ -769,13 +774,13 @@
// viz. [ x1, .. , xn ] << d = [ x1 << d, .. , xn << d ].
class HVecShl FINAL : public HVecBinaryOperation {
public:
- HVecShl(ArenaAllocator* arena,
+ HVecShl(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(left, packed_type));
}
@@ -791,13 +796,13 @@
// viz. [ x1, .. , xn ] >> d = [ x1 >> d, .. , xn >> d ].
class HVecShr FINAL : public HVecBinaryOperation {
public:
- HVecShr(ArenaAllocator* arena,
+ HVecShr(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(left, packed_type));
}
@@ -813,13 +818,13 @@
// viz. [ x1, .. , xn ] >>> d = [ x1 >>> d, .. , xn >>> d ].
class HVecUShr FINAL : public HVecBinaryOperation {
public:
- HVecUShr(ArenaAllocator* arena,
+ HVecUShr(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(left, packed_type));
}
@@ -840,13 +845,13 @@
// set( array(x1, .. , xm) ) = [ x1, .. , xm, 0, .. , 0 ] if m < n.
class HVecSetScalars FINAL : public HVecOperation {
public:
- HVecSetScalars(ArenaAllocator* arena,
+ HVecSetScalars(ArenaAllocator* allocator,
HInstruction* scalars[],
DataType::Type packed_type,
size_t vector_length,
size_t number_of_scalars,
uint32_t dex_pc)
- : HVecOperation(arena,
+ : HVecOperation(allocator,
packed_type,
SideEffects::None(),
number_of_scalars,
@@ -872,7 +877,7 @@
// viz. [ a1, .. , an ] + [ x1, .. , xn ] * [ y1, .. , yn ] = [ a1 + x1 * y1, .. , an + xn * yn ].
class HVecMultiplyAccumulate FINAL : public HVecOperation {
public:
- HVecMultiplyAccumulate(ArenaAllocator* arena,
+ HVecMultiplyAccumulate(ArenaAllocator* allocator,
InstructionKind op,
HInstruction* accumulator,
HInstruction* mul_left,
@@ -880,7 +885,7 @@
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecOperation(arena,
+ : HVecOperation(allocator,
packed_type,
SideEffects::None(),
/* number_of_inputs */ 3,
@@ -922,14 +927,14 @@
// for m <= n, non-overlapping sums, and signed operands x, y.
class HVecSADAccumulate FINAL : public HVecOperation {
public:
- HVecSADAccumulate(ArenaAllocator* arena,
+ HVecSADAccumulate(ArenaAllocator* allocator,
HInstruction* accumulator,
HInstruction* sad_left,
HInstruction* sad_right,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecOperation(arena,
+ : HVecOperation(allocator,
packed_type,
SideEffects::None(),
/* number_of_inputs */ 3,
@@ -955,7 +960,7 @@
// yield the vector [ mem(1), .. , mem(n) ].
class HVecLoad FINAL : public HVecMemoryOperation {
public:
- HVecLoad(ArenaAllocator* arena,
+ HVecLoad(ArenaAllocator* allocator,
HInstruction* base,
HInstruction* index,
DataType::Type packed_type,
@@ -963,7 +968,7 @@
size_t vector_length,
bool is_string_char_at,
uint32_t dex_pc)
- : HVecMemoryOperation(arena,
+ : HVecMemoryOperation(allocator,
packed_type,
side_effects,
/* number_of_inputs */ 2,
@@ -999,7 +1004,7 @@
// sets mem(1) = x1, .. , mem(n) = xn.
class HVecStore FINAL : public HVecMemoryOperation {
public:
- HVecStore(ArenaAllocator* arena,
+ HVecStore(ArenaAllocator* allocator,
HInstruction* base,
HInstruction* index,
HInstruction* value,
@@ -1007,7 +1012,7 @@
SideEffects side_effects,
size_t vector_length,
uint32_t dex_pc)
- : HVecMemoryOperation(arena,
+ : HVecMemoryOperation(allocator,
packed_type,
side_effects,
/* number_of_inputs */ 3,
diff --git a/compiler/optimizing/nodes_vector_test.cc b/compiler/optimizing/nodes_vector_test.cc
index d3a499c..ab9d759 100644
--- a/compiler/optimizing/nodes_vector_test.cc
+++ b/compiler/optimizing/nodes_vector_test.cc
@@ -23,12 +23,10 @@
/**
* Fixture class for testing vector nodes.
*/
-class NodesVectorTest : public CommonCompilerTest {
+class NodesVectorTest : public OptimizingUnitTest {
public:
NodesVectorTest()
- : pool_(),
- allocator_(&pool_),
- graph_(CreateGraph(&allocator_)) {
+ : graph_(CreateGraph()) {
BuildGraph();
}
@@ -36,32 +34,30 @@
void BuildGraph() {
graph_->SetNumberOfVRegs(1);
- entry_block_ = new (&allocator_) HBasicBlock(graph_);
- exit_block_ = new (&allocator_) HBasicBlock(graph_);
+ entry_block_ = new (GetAllocator()) HBasicBlock(graph_);
+ exit_block_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry_block_);
graph_->AddBlock(exit_block_);
graph_->SetEntryBlock(entry_block_);
graph_->SetExitBlock(exit_block_);
- int8_parameter_ = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(1),
- 0,
- DataType::Type::kInt8);
+ int8_parameter_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(1),
+ 0,
+ DataType::Type::kInt8);
entry_block_->AddInstruction(int8_parameter_);
- int16_parameter_ = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(2),
- 0,
- DataType::Type::kInt16);
+ int16_parameter_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(2),
+ 0,
+ DataType::Type::kInt16);
entry_block_->AddInstruction(int16_parameter_);
- int32_parameter_ = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kInt32);
+ int32_parameter_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kInt32);
entry_block_->AddInstruction(int32_parameter_);
}
// General building fields.
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
HBasicBlock* entry_block_;
@@ -134,16 +130,16 @@
}
TEST_F(NodesVectorTest, VectorOperationProperties) {
- HVecOperation* v0 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
- HVecOperation* v1 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
- HVecOperation* v2 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt32, 2, kNoDexPc);
- HVecOperation* v3 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
- HVecOperation* v4 = new (&allocator_) HVecStore(
- &allocator_,
+ HVecOperation* v0 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
+ HVecOperation* v1 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
+ HVecOperation* v2 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 2, kNoDexPc);
+ HVecOperation* v3 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
+ HVecOperation* v4 = new (GetAllocator()) HVecStore(
+ GetAllocator(),
int32_parameter_,
int32_parameter_,
v0,
@@ -198,30 +194,30 @@
}
TEST_F(NodesVectorTest, VectorAlignmentAndStringCharAtMatterOnLoad) {
- HVecLoad* v0 = new (&allocator_) HVecLoad(&allocator_,
- int32_parameter_,
- int32_parameter_,
- DataType::Type::kInt32,
- SideEffects::ArrayReadOfType(DataType::Type::kInt32),
- 4,
- /*is_string_char_at*/ false,
- kNoDexPc);
- HVecLoad* v1 = new (&allocator_) HVecLoad(&allocator_,
- int32_parameter_,
- int32_parameter_,
- DataType::Type::kInt32,
- SideEffects::ArrayReadOfType(DataType::Type::kInt32),
- 4,
- /*is_string_char_at*/ false,
- kNoDexPc);
- HVecLoad* v2 = new (&allocator_) HVecLoad(&allocator_,
- int32_parameter_,
- int32_parameter_,
- DataType::Type::kInt32,
- SideEffects::ArrayReadOfType(DataType::Type::kInt32),
- 4,
- /*is_string_char_at*/ true,
- kNoDexPc);
+ HVecLoad* v0 = new (GetAllocator()) HVecLoad(GetAllocator(),
+ int32_parameter_,
+ int32_parameter_,
+ DataType::Type::kInt32,
+ SideEffects::ArrayReadOfType(DataType::Type::kInt32),
+ 4,
+ /*is_string_char_at*/ false,
+ kNoDexPc);
+ HVecLoad* v1 = new (GetAllocator()) HVecLoad(GetAllocator(),
+ int32_parameter_,
+ int32_parameter_,
+ DataType::Type::kInt32,
+ SideEffects::ArrayReadOfType(DataType::Type::kInt32),
+ 4,
+ /*is_string_char_at*/ false,
+ kNoDexPc);
+ HVecLoad* v2 = new (GetAllocator()) HVecLoad(GetAllocator(),
+ int32_parameter_,
+ int32_parameter_,
+ DataType::Type::kInt32,
+ SideEffects::ArrayReadOfType(DataType::Type::kInt32),
+ 4,
+ /*is_string_char_at*/ true,
+ kNoDexPc);
EXPECT_TRUE(v0->CanBeMoved());
EXPECT_TRUE(v1->CanBeMoved());
@@ -250,10 +246,10 @@
}
TEST_F(NodesVectorTest, VectorAlignmentMattersOnStore) {
- HVecOperation* p0 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
- HVecStore* v0 = new (&allocator_) HVecStore(
- &allocator_,
+ HVecOperation* p0 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
+ HVecStore* v0 = new (GetAllocator()) HVecStore(
+ GetAllocator(),
int32_parameter_,
int32_parameter_,
p0,
@@ -261,8 +257,8 @@
SideEffects::ArrayWriteOfType(DataType::Type::kInt32),
4,
kNoDexPc);
- HVecStore* v1 = new (&allocator_) HVecStore(
- &allocator_,
+ HVecStore* v1 = new (GetAllocator()) HVecStore(
+ GetAllocator(),
int32_parameter_,
int32_parameter_,
p0,
@@ -287,27 +283,27 @@
}
TEST_F(NodesVectorTest, VectorSignMattersOnMin) {
- HVecOperation* p0 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
- HVecOperation* p1 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
- HVecOperation* p2 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
+ HVecOperation* p0 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
+ HVecOperation* p1 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
+ HVecOperation* p2 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
- HVecMin* v0 = new (&allocator_) HVecMin(
- &allocator_, p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ true, kNoDexPc);
- HVecMin* v1 = new (&allocator_) HVecMin(
- &allocator_, p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ false, kNoDexPc);
- HVecMin* v2 = new (&allocator_) HVecMin(
- &allocator_, p0, p0, DataType::Type::kInt32, 2, /*is_unsigned*/ true, kNoDexPc);
- HVecMin* v3 = new (&allocator_) HVecMin(
- &allocator_, p1, p1, DataType::Type::kUint8, 16, /*is_unsigned*/ false, kNoDexPc);
- HVecMin* v4 = new (&allocator_) HVecMin(
- &allocator_, p1, p1, DataType::Type::kInt8, 16, /*is_unsigned*/ false, kNoDexPc);
- HVecMin* v5 = new (&allocator_) HVecMin(
- &allocator_, p2, p2, DataType::Type::kUint16, 8, /*is_unsigned*/ false, kNoDexPc);
- HVecMin* v6 = new (&allocator_) HVecMin(
- &allocator_, p2, p2, DataType::Type::kInt16, 8, /*is_unsigned*/ false, kNoDexPc);
+ HVecMin* v0 = new (GetAllocator()) HVecMin(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ true, kNoDexPc);
+ HVecMin* v1 = new (GetAllocator()) HVecMin(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ false, kNoDexPc);
+ HVecMin* v2 = new (GetAllocator()) HVecMin(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 2, /*is_unsigned*/ true, kNoDexPc);
+ HVecMin* v3 = new (GetAllocator()) HVecMin(
+ GetAllocator(), p1, p1, DataType::Type::kUint8, 16, /*is_unsigned*/ false, kNoDexPc);
+ HVecMin* v4 = new (GetAllocator()) HVecMin(
+ GetAllocator(), p1, p1, DataType::Type::kInt8, 16, /*is_unsigned*/ false, kNoDexPc);
+ HVecMin* v5 = new (GetAllocator()) HVecMin(
+ GetAllocator(), p2, p2, DataType::Type::kUint16, 8, /*is_unsigned*/ false, kNoDexPc);
+ HVecMin* v6 = new (GetAllocator()) HVecMin(
+ GetAllocator(), p2, p2, DataType::Type::kInt16, 8, /*is_unsigned*/ false, kNoDexPc);
HVecMin* min_insns[] = { v0, v1, v2, v3, v4, v5, v6 };
EXPECT_FALSE(p0->CanBeMoved());
@@ -331,27 +327,27 @@
}
TEST_F(NodesVectorTest, VectorSignMattersOnMax) {
- HVecOperation* p0 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
- HVecOperation* p1 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
- HVecOperation* p2 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
+ HVecOperation* p0 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
+ HVecOperation* p1 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
+ HVecOperation* p2 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
- HVecMax* v0 = new (&allocator_) HVecMax(
- &allocator_, p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ true, kNoDexPc);
- HVecMax* v1 = new (&allocator_) HVecMax(
- &allocator_, p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ false, kNoDexPc);
- HVecMax* v2 = new (&allocator_) HVecMax(
- &allocator_, p0, p0, DataType::Type::kInt32, 2, /*is_unsigned*/ true, kNoDexPc);
- HVecMax* v3 = new (&allocator_) HVecMax(
- &allocator_, p1, p1, DataType::Type::kUint8, 16, /*is_unsigned*/ false, kNoDexPc);
- HVecMax* v4 = new (&allocator_) HVecMax(
- &allocator_, p1, p1, DataType::Type::kInt8, 16, /*is_unsigned*/ false, kNoDexPc);
- HVecMax* v5 = new (&allocator_) HVecMax(
- &allocator_, p2, p2, DataType::Type::kUint16, 8, /*is_unsigned*/ false, kNoDexPc);
- HVecMax* v6 = new (&allocator_) HVecMax(
- &allocator_, p2, p2, DataType::Type::kInt16, 8, /*is_unsigned*/ false, kNoDexPc);
+ HVecMax* v0 = new (GetAllocator()) HVecMax(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ true, kNoDexPc);
+ HVecMax* v1 = new (GetAllocator()) HVecMax(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ false, kNoDexPc);
+ HVecMax* v2 = new (GetAllocator()) HVecMax(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 2, /*is_unsigned*/ true, kNoDexPc);
+ HVecMax* v3 = new (GetAllocator()) HVecMax(
+ GetAllocator(), p1, p1, DataType::Type::kUint8, 16, /*is_unsigned*/ false, kNoDexPc);
+ HVecMax* v4 = new (GetAllocator()) HVecMax(
+ GetAllocator(), p1, p1, DataType::Type::kInt8, 16, /*is_unsigned*/ false, kNoDexPc);
+ HVecMax* v5 = new (GetAllocator()) HVecMax(
+ GetAllocator(), p2, p2, DataType::Type::kUint16, 8, /*is_unsigned*/ false, kNoDexPc);
+ HVecMax* v6 = new (GetAllocator()) HVecMax(
+ GetAllocator(), p2, p2, DataType::Type::kInt16, 8, /*is_unsigned*/ false, kNoDexPc);
HVecMax* max_insns[] = { v0, v1, v2, v3, v4, v5, v6 };
EXPECT_FALSE(p0->CanBeMoved());
@@ -375,51 +371,51 @@
}
TEST_F(NodesVectorTest, VectorAttributesMatterOnHalvingAdd) {
- HVecOperation* p0 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
- HVecOperation* p1 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
- HVecOperation* p2 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
+ HVecOperation* p0 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
+ HVecOperation* p1 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
+ HVecOperation* p2 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
- HVecHalvingAdd* v0 = new (&allocator_) HVecHalvingAdd(
- &allocator_, p0, p0, DataType::Type::kInt32, 4,
+ HVecHalvingAdd* v0 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
/*is_rounded*/ true, /*is_unsigned*/ true, kNoDexPc);
- HVecHalvingAdd* v1 = new (&allocator_) HVecHalvingAdd(
- &allocator_, p0, p0, DataType::Type::kInt32, 4,
+ HVecHalvingAdd* v1 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
/*is_rounded*/ false, /*is_unsigned*/ true, kNoDexPc);
- HVecHalvingAdd* v2 = new (&allocator_) HVecHalvingAdd(
- &allocator_, p0, p0, DataType::Type::kInt32, 4,
+ HVecHalvingAdd* v2 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
/*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
- HVecHalvingAdd* v3 = new (&allocator_) HVecHalvingAdd(
- &allocator_, p0, p0, DataType::Type::kInt32, 4,
+ HVecHalvingAdd* v3 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
/*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
- HVecHalvingAdd* v4 = new (&allocator_) HVecHalvingAdd(
- &allocator_, p0, p0, DataType::Type::kInt32, 2,
+ HVecHalvingAdd* v4 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 2,
/*is_rounded*/ true, /*is_unsigned*/ true, kNoDexPc);
- HVecHalvingAdd* v5 = new (&allocator_) HVecHalvingAdd(
- &allocator_, p1, p1, DataType::Type::kUint8, 16,
+ HVecHalvingAdd* v5 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p1, p1, DataType::Type::kUint8, 16,
/*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
- HVecHalvingAdd* v6 = new (&allocator_) HVecHalvingAdd(
- &allocator_, p1, p1, DataType::Type::kUint8, 16,
+ HVecHalvingAdd* v6 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p1, p1, DataType::Type::kUint8, 16,
/*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
- HVecHalvingAdd* v7 = new (&allocator_) HVecHalvingAdd(
- &allocator_, p1, p1, DataType::Type::kInt8, 16,
+ HVecHalvingAdd* v7 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p1, p1, DataType::Type::kInt8, 16,
/*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
- HVecHalvingAdd* v8 = new (&allocator_) HVecHalvingAdd(
- &allocator_, p1, p1, DataType::Type::kInt8, 16,
+ HVecHalvingAdd* v8 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p1, p1, DataType::Type::kInt8, 16,
/*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
- HVecHalvingAdd* v9 = new (&allocator_) HVecHalvingAdd(
- &allocator_, p2, p2, DataType::Type::kUint16, 8,
+ HVecHalvingAdd* v9 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p2, p2, DataType::Type::kUint16, 8,
/*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
- HVecHalvingAdd* v10 = new (&allocator_) HVecHalvingAdd(
- &allocator_, p2, p2, DataType::Type::kUint16, 8,
+ HVecHalvingAdd* v10 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p2, p2, DataType::Type::kUint16, 8,
/*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
- HVecHalvingAdd* v11 = new (&allocator_) HVecHalvingAdd(
- &allocator_, p2, p2, DataType::Type::kInt16, 2,
+ HVecHalvingAdd* v11 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p2, p2, DataType::Type::kInt16, 2,
/*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
- HVecHalvingAdd* v12 = new (&allocator_) HVecHalvingAdd(
- &allocator_, p2, p2, DataType::Type::kInt16, 2,
+ HVecHalvingAdd* v12 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p2, p2, DataType::Type::kInt16, 2,
/*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
HVecHalvingAdd* hadd_insns[] = { v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12 };
@@ -460,15 +456,15 @@
}
TEST_F(NodesVectorTest, VectorOperationMattersOnMultiplyAccumulate) {
- HVecOperation* v0 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
+ HVecOperation* v0 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
- HVecMultiplyAccumulate* v1 = new (&allocator_) HVecMultiplyAccumulate(
- &allocator_, HInstruction::kAdd, v0, v0, v0, DataType::Type::kInt32, 4, kNoDexPc);
- HVecMultiplyAccumulate* v2 = new (&allocator_) HVecMultiplyAccumulate(
- &allocator_, HInstruction::kSub, v0, v0, v0, DataType::Type::kInt32, 4, kNoDexPc);
- HVecMultiplyAccumulate* v3 = new (&allocator_) HVecMultiplyAccumulate(
- &allocator_, HInstruction::kAdd, v0, v0, v0, DataType::Type::kInt32, 2, kNoDexPc);
+ HVecMultiplyAccumulate* v1 = new (GetAllocator()) HVecMultiplyAccumulate(
+ GetAllocator(), HInstruction::kAdd, v0, v0, v0, DataType::Type::kInt32, 4, kNoDexPc);
+ HVecMultiplyAccumulate* v2 = new (GetAllocator()) HVecMultiplyAccumulate(
+ GetAllocator(), HInstruction::kSub, v0, v0, v0, DataType::Type::kInt32, 4, kNoDexPc);
+ HVecMultiplyAccumulate* v3 = new (GetAllocator()) HVecMultiplyAccumulate(
+ GetAllocator(), HInstruction::kAdd, v0, v0, v0, DataType::Type::kInt32, 2, kNoDexPc);
EXPECT_FALSE(v0->CanBeMoved());
EXPECT_TRUE(v1->CanBeMoved());
@@ -488,15 +484,15 @@
}
TEST_F(NodesVectorTest, VectorKindMattersOnReduce) {
- HVecOperation* v0 = new (&allocator_)
- HVecReplicateScalar(&allocator_, int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
+ HVecOperation* v0 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
- HVecReduce* v1 = new (&allocator_) HVecReduce(
- &allocator_, v0, DataType::Type::kInt32, 4, HVecReduce::kSum, kNoDexPc);
- HVecReduce* v2 = new (&allocator_) HVecReduce(
- &allocator_, v0, DataType::Type::kInt32, 4, HVecReduce::kMin, kNoDexPc);
- HVecReduce* v3 = new (&allocator_) HVecReduce(
- &allocator_, v0, DataType::Type::kInt32, 4, HVecReduce::kMax, kNoDexPc);
+ HVecReduce* v1 = new (GetAllocator()) HVecReduce(
+ GetAllocator(), v0, DataType::Type::kInt32, 4, HVecReduce::kSum, kNoDexPc);
+ HVecReduce* v2 = new (GetAllocator()) HVecReduce(
+ GetAllocator(), v0, DataType::Type::kInt32, 4, HVecReduce::kMin, kNoDexPc);
+ HVecReduce* v3 = new (GetAllocator()) HVecReduce(
+ GetAllocator(), v0, DataType::Type::kInt32, 4, HVecReduce::kMax, kNoDexPc);
EXPECT_FALSE(v0->CanBeMoved());
EXPECT_TRUE(v1->CanBeMoved());
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index 99d5284..bd65cbf 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -46,19 +46,20 @@
static constexpr bool kGenerateExpected = false;
OptimizingCFITest()
- : pool_(),
- allocator_(&pool_),
+ : pool_and_allocator_(),
opts_(),
isa_features_(),
graph_(nullptr),
code_gen_(),
- blocks_(allocator_.Adapter()) {}
+ blocks_(GetAllocator()->Adapter()) {}
+
+ ArenaAllocator* GetAllocator() { return pool_and_allocator_.GetAllocator(); }
void SetUpFrame(InstructionSet isa) {
// Setup simple context.
std::string error;
isa_features_ = InstructionSetFeatures::FromVariant(isa, "default", &error);
- graph_ = CreateGraph(&allocator_);
+ graph_ = CreateGraph(&pool_and_allocator_);
// Generate simple frame with some spills.
code_gen_ = CodeGenerator::Create(graph_, isa, *isa_features_, opts_);
code_gen_->GetAssembler()->cfi().SetEnabled(true);
@@ -142,8 +143,7 @@
DISALLOW_COPY_AND_ASSIGN(InternalCodeAllocator);
};
- ArenaPool pool_;
- ArenaAllocator allocator_;
+ ArenaPoolAndAllocator pool_and_allocator_;
CompilerOptions opts_;
std::unique_ptr<const InstructionSetFeatures> isa_features_;
HGraph* graph_;
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 1e06ea8..9bfb7a5 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -45,6 +45,7 @@
#include "base/dumpable.h"
#include "base/macros.h"
#include "base/mutex.h"
+#include "base/scoped_arena_allocator.h"
#include "base/timing_logger.h"
#include "bounds_check_elimination.h"
#include "builder.h"
@@ -108,8 +109,8 @@
*/
class CodeVectorAllocator FINAL : public CodeAllocator {
public:
- explicit CodeVectorAllocator(ArenaAllocator* arena)
- : memory_(arena->Adapter(kArenaAllocCodeBuffer)),
+ explicit CodeVectorAllocator(ArenaAllocator* allocator)
+ : memory_(allocator->Adapter(kArenaAllocCodeBuffer)),
size_(0) {}
virtual uint8_t* Allocate(size_t size) {
@@ -148,7 +149,7 @@
cached_method_name_(),
timing_logger_enabled_(compiler_driver->GetDumpPasses()),
timing_logger_(timing_logger_enabled_ ? GetMethodName() : "", true, true),
- disasm_info_(graph->GetArena()),
+ disasm_info_(graph->GetAllocator()),
visualizer_oss_(),
visualizer_output_(visualizer_output),
visualizer_enabled_(!compiler_driver->GetCompilerOptions().GetDumpCfgFileName().empty()),
@@ -351,7 +352,7 @@
private:
// Create a 'CompiledMethod' for an optimized graph.
- CompiledMethod* Emit(ArenaAllocator* arena,
+ CompiledMethod* Emit(ArenaAllocator* allocator,
CodeVectorAllocator* code_allocator,
CodeGenerator* codegen,
CompilerDriver* driver,
@@ -364,7 +365,8 @@
// 2) Transforms the graph to SSA. Returns null if it failed.
// 3) Runs optimizations on the graph, including register allocator.
// 4) Generates code with the `code_allocator` provided.
- CodeGenerator* TryCompile(ArenaAllocator* arena,
+ CodeGenerator* TryCompile(ArenaAllocator* allocator,
+ ArenaStack* arena_stack,
CodeVectorAllocator* code_allocator,
const DexFile::CodeItem* code_item,
uint32_t access_flags,
@@ -452,7 +454,7 @@
static HOptimization* BuildOptimization(
const std::string& pass_name,
- ArenaAllocator* arena,
+ ArenaAllocator* allocator,
HGraph* graph,
OptimizingCompilerStats* stats,
CodeGenerator* codegen,
@@ -465,78 +467,79 @@
std::string opt_name = ConvertPassNameToOptimizationName(pass_name);
if (opt_name == BoundsCheckElimination::kBoundsCheckEliminationPassName) {
CHECK(most_recent_side_effects != nullptr && most_recent_induction != nullptr);
- return new (arena) BoundsCheckElimination(graph,
- *most_recent_side_effects,
- most_recent_induction);
+ return new (allocator) BoundsCheckElimination(graph,
+ *most_recent_side_effects,
+ most_recent_induction);
} else if (opt_name == GVNOptimization::kGlobalValueNumberingPassName) {
CHECK(most_recent_side_effects != nullptr);
- return new (arena) GVNOptimization(graph, *most_recent_side_effects, pass_name.c_str());
+ return new (allocator) GVNOptimization(graph, *most_recent_side_effects, pass_name.c_str());
} else if (opt_name == HConstantFolding::kConstantFoldingPassName) {
- return new (arena) HConstantFolding(graph, pass_name.c_str());
+ return new (allocator) HConstantFolding(graph, pass_name.c_str());
} else if (opt_name == HDeadCodeElimination::kDeadCodeEliminationPassName) {
- return new (arena) HDeadCodeElimination(graph, stats, pass_name.c_str());
+ return new (allocator) HDeadCodeElimination(graph, stats, pass_name.c_str());
} else if (opt_name == HInliner::kInlinerPassName) {
size_t number_of_dex_registers = dex_compilation_unit.GetCodeItem()->registers_size_;
- return new (arena) HInliner(graph, // outer_graph
- graph, // outermost_graph
- codegen,
- dex_compilation_unit, // outer_compilation_unit
- dex_compilation_unit, // outermost_compilation_unit
- driver,
- handles,
- stats,
- number_of_dex_registers,
- /* total_number_of_instructions */ 0,
- /* parent */ nullptr);
+ return new (allocator) HInliner(graph, // outer_graph
+ graph, // outermost_graph
+ codegen,
+ dex_compilation_unit, // outer_compilation_unit
+ dex_compilation_unit, // outermost_compilation_unit
+ driver,
+ handles,
+ stats,
+ number_of_dex_registers,
+ /* total_number_of_instructions */ 0,
+ /* parent */ nullptr);
} else if (opt_name == HSharpening::kSharpeningPassName) {
- return new (arena) HSharpening(graph, codegen, dex_compilation_unit, driver, handles);
+ return new (allocator) HSharpening(graph, codegen, dex_compilation_unit, driver, handles);
} else if (opt_name == HSelectGenerator::kSelectGeneratorPassName) {
- return new (arena) HSelectGenerator(graph, handles, stats);
+ return new (allocator) HSelectGenerator(graph, handles, stats);
} else if (opt_name == HInductionVarAnalysis::kInductionPassName) {
- return new (arena) HInductionVarAnalysis(graph);
+ return new (allocator) HInductionVarAnalysis(graph);
} else if (opt_name == InstructionSimplifier::kInstructionSimplifierPassName) {
- return new (arena) InstructionSimplifier(graph, codegen, driver, stats, pass_name.c_str());
+ return new (allocator) InstructionSimplifier(graph, codegen, driver, stats, pass_name.c_str());
} else if (opt_name == IntrinsicsRecognizer::kIntrinsicsRecognizerPassName) {
- return new (arena) IntrinsicsRecognizer(graph, stats);
+ return new (allocator) IntrinsicsRecognizer(graph, stats);
} else if (opt_name == LICM::kLoopInvariantCodeMotionPassName) {
CHECK(most_recent_side_effects != nullptr);
- return new (arena) LICM(graph, *most_recent_side_effects, stats);
+ return new (allocator) LICM(graph, *most_recent_side_effects, stats);
} else if (opt_name == LoadStoreAnalysis::kLoadStoreAnalysisPassName) {
- return new (arena) LoadStoreAnalysis(graph);
+ return new (allocator) LoadStoreAnalysis(graph);
} else if (opt_name == LoadStoreElimination::kLoadStoreEliminationPassName) {
CHECK(most_recent_side_effects != nullptr);
CHECK(most_recent_lsa != nullptr);
- return
- new (arena) LoadStoreElimination(graph, *most_recent_side_effects, *most_recent_lsa, stats);
+ return new (allocator) LoadStoreElimination(graph,
+ *most_recent_side_effects,
+ *most_recent_lsa, stats);
} else if (opt_name == SideEffectsAnalysis::kSideEffectsAnalysisPassName) {
- return new (arena) SideEffectsAnalysis(graph);
+ return new (allocator) SideEffectsAnalysis(graph);
} else if (opt_name == HLoopOptimization::kLoopOptimizationPassName) {
- return new (arena) HLoopOptimization(graph, driver, most_recent_induction, stats);
+ return new (allocator) HLoopOptimization(graph, driver, most_recent_induction, stats);
} else if (opt_name == CHAGuardOptimization::kCHAGuardOptimizationPassName) {
- return new (arena) CHAGuardOptimization(graph);
+ return new (allocator) CHAGuardOptimization(graph);
} else if (opt_name == CodeSinking::kCodeSinkingPassName) {
- return new (arena) CodeSinking(graph, stats);
+ return new (allocator) CodeSinking(graph, stats);
} else if (opt_name == ConstructorFenceRedundancyElimination::kPassName) {
- return new (arena) ConstructorFenceRedundancyElimination(graph, stats);
+ return new (allocator) ConstructorFenceRedundancyElimination(graph, stats);
#ifdef ART_ENABLE_CODEGEN_arm
} else if (opt_name == arm::InstructionSimplifierArm::kInstructionSimplifierArmPassName) {
- return new (arena) arm::InstructionSimplifierArm(graph, stats);
+ return new (allocator) arm::InstructionSimplifierArm(graph, stats);
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
} else if (opt_name == arm64::InstructionSimplifierArm64::kInstructionSimplifierArm64PassName) {
- return new (arena) arm64::InstructionSimplifierArm64(graph, stats);
+ return new (allocator) arm64::InstructionSimplifierArm64(graph, stats);
#endif
#ifdef ART_ENABLE_CODEGEN_mips
} else if (opt_name == mips::PcRelativeFixups::kPcRelativeFixupsMipsPassName) {
- return new (arena) mips::PcRelativeFixups(graph, codegen, stats);
+ return new (allocator) mips::PcRelativeFixups(graph, codegen, stats);
} else if (opt_name == mips::InstructionSimplifierMips::kInstructionSimplifierMipsPassName) {
- return new (arena) mips::InstructionSimplifierMips(graph, codegen, stats);
+ return new (allocator) mips::InstructionSimplifierMips(graph, codegen, stats);
#endif
#ifdef ART_ENABLE_CODEGEN_x86
} else if (opt_name == x86::PcRelativeFixups::kPcRelativeFixupsX86PassName) {
- return new (arena) x86::PcRelativeFixups(graph, codegen, stats);
+ return new (allocator) x86::PcRelativeFixups(graph, codegen, stats);
} else if (opt_name == x86::X86MemoryOperandGeneration::kX86MemoryOperandGenerationPassName) {
- return new (arena) x86::X86MemoryOperandGeneration(graph, codegen, stats);
+ return new (allocator) x86::X86MemoryOperandGeneration(graph, codegen, stats);
#endif
}
return nullptr;
@@ -544,7 +547,7 @@
static ArenaVector<HOptimization*> BuildOptimizations(
const std::vector<std::string>& pass_names,
- ArenaAllocator* arena,
+ ArenaAllocator* allocator,
HGraph* graph,
OptimizingCompilerStats* stats,
CodeGenerator* codegen,
@@ -557,11 +560,11 @@
SideEffectsAnalysis* most_recent_side_effects = nullptr;
HInductionVarAnalysis* most_recent_induction = nullptr;
LoadStoreAnalysis* most_recent_lsa = nullptr;
- ArenaVector<HOptimization*> ret(arena->Adapter());
+ ArenaVector<HOptimization*> ret(allocator->Adapter());
for (const std::string& pass_name : pass_names) {
HOptimization* opt = BuildOptimization(
pass_name,
- arena,
+ allocator,
graph,
stats,
codegen,
@@ -608,7 +611,7 @@
return;
}
size_t number_of_dex_registers = dex_compilation_unit.GetCodeItem()->registers_size_;
- HInliner* inliner = new (graph->GetArena()) HInliner(
+ HInliner* inliner = new (graph->GetAllocator()) HInliner(
graph, // outer_graph
graph, // outermost_graph
codegen,
@@ -631,17 +634,18 @@
PassObserver* pass_observer) const {
UNUSED(codegen); // To avoid compilation error when compiling for svelte
OptimizingCompilerStats* stats = compilation_stats_.get();
- ArenaAllocator* arena = graph->GetArena();
+ ArenaAllocator* allocator = graph->GetAllocator();
switch (instruction_set) {
#if defined(ART_ENABLE_CODEGEN_arm)
case kThumb2:
case kArm: {
arm::InstructionSimplifierArm* simplifier =
- new (arena) arm::InstructionSimplifierArm(graph, stats);
- SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch");
+ new (allocator) arm::InstructionSimplifierArm(graph, stats);
+ SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
+ GVNOptimization* gvn =
+ new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
HInstructionScheduling* scheduling =
- new (arena) HInstructionScheduling(graph, instruction_set, codegen);
+ new (allocator) HInstructionScheduling(graph, instruction_set, codegen);
HOptimization* arm_optimizations[] = {
simplifier,
side_effects,
@@ -655,11 +659,12 @@
#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64: {
arm64::InstructionSimplifierArm64* simplifier =
- new (arena) arm64::InstructionSimplifierArm64(graph, stats);
- SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch");
+ new (allocator) arm64::InstructionSimplifierArm64(graph, stats);
+ SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
+ GVNOptimization* gvn =
+ new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
HInstructionScheduling* scheduling =
- new (arena) HInstructionScheduling(graph, instruction_set);
+ new (allocator) HInstructionScheduling(graph, instruction_set);
HOptimization* arm64_optimizations[] = {
simplifier,
side_effects,
@@ -673,11 +678,12 @@
#ifdef ART_ENABLE_CODEGEN_mips
case kMips: {
mips::InstructionSimplifierMips* simplifier =
- new (arena) mips::InstructionSimplifierMips(graph, codegen, stats);
- SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch");
+ new (allocator) mips::InstructionSimplifierMips(graph, codegen, stats);
+ SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
+ GVNOptimization* gvn =
+ new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
mips::PcRelativeFixups* pc_relative_fixups =
- new (arena) mips::PcRelativeFixups(graph, codegen, stats);
+ new (allocator) mips::PcRelativeFixups(graph, codegen, stats);
HOptimization* mips_optimizations[] = {
simplifier,
side_effects,
@@ -690,8 +696,9 @@
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64: {
- SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch");
+ SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
+ GVNOptimization* gvn =
+ new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
HOptimization* mips64_optimizations[] = {
side_effects,
gvn,
@@ -702,12 +709,13 @@
#endif
#ifdef ART_ENABLE_CODEGEN_x86
case kX86: {
- SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch");
+ SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
+ GVNOptimization* gvn =
+ new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
x86::PcRelativeFixups* pc_relative_fixups =
- new (arena) x86::PcRelativeFixups(graph, codegen, stats);
+ new (allocator) x86::PcRelativeFixups(graph, codegen, stats);
x86::X86MemoryOperandGeneration* memory_gen =
- new (arena) x86::X86MemoryOperandGeneration(graph, codegen, stats);
+ new (allocator) x86::X86MemoryOperandGeneration(graph, codegen, stats);
HOptimization* x86_optimizations[] = {
side_effects,
gvn,
@@ -720,10 +728,11 @@
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64: {
- SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch");
+ SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
+ GVNOptimization* gvn =
+ new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
x86::X86MemoryOperandGeneration* memory_gen =
- new (arena) x86::X86MemoryOperandGeneration(graph, codegen, stats);
+ new (allocator) x86::X86MemoryOperandGeneration(graph, codegen, stats);
HOptimization* x86_64_optimizations[] = {
side_effects,
gvn,
@@ -749,14 +758,19 @@
pass_observer);
PrepareForRegisterAllocation(graph, stats).Run();
}
- SsaLivenessAnalysis liveness(graph, codegen);
+ // Use local allocator shared by SSA liveness analysis and register allocator.
+ // (Register allocator creates new objects in the liveness data.)
+ ScopedArenaAllocator local_allocator(graph->GetArenaStack());
+ SsaLivenessAnalysis liveness(graph, codegen, &local_allocator);
{
PassScope scope(SsaLivenessAnalysis::kLivenessPassName, pass_observer);
liveness.Analyze();
}
{
PassScope scope(RegisterAllocator::kRegisterAllocatorPassName, pass_observer);
- RegisterAllocator::Create(graph->GetArena(), codegen, liveness, strategy)->AllocateRegisters();
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(&local_allocator, codegen, liveness, strategy);
+ register_allocator->AllocateRegisters();
}
}
@@ -767,11 +781,11 @@
PassObserver* pass_observer,
VariableSizedHandleScope* handles) const {
OptimizingCompilerStats* stats = compilation_stats_.get();
- ArenaAllocator* arena = graph->GetArena();
+ ArenaAllocator* allocator = graph->GetAllocator();
if (driver->GetCompilerOptions().GetPassesToRun() != nullptr) {
ArenaVector<HOptimization*> optimizations = BuildOptimizations(
*driver->GetCompilerOptions().GetPassesToRun(),
- arena,
+ allocator,
graph,
stats,
codegen,
@@ -782,43 +796,45 @@
return;
}
- HDeadCodeElimination* dce1 = new (arena) HDeadCodeElimination(
+ HDeadCodeElimination* dce1 = new (allocator) HDeadCodeElimination(
graph, stats, "dead_code_elimination$initial");
- HDeadCodeElimination* dce2 = new (arena) HDeadCodeElimination(
+ HDeadCodeElimination* dce2 = new (allocator) HDeadCodeElimination(
graph, stats, "dead_code_elimination$after_inlining");
- HDeadCodeElimination* dce3 = new (arena) HDeadCodeElimination(
+ HDeadCodeElimination* dce3 = new (allocator) HDeadCodeElimination(
graph, stats, "dead_code_elimination$final");
- HConstantFolding* fold1 = new (arena) HConstantFolding(graph, "constant_folding");
- InstructionSimplifier* simplify1 = new (arena) InstructionSimplifier(
+ HConstantFolding* fold1 = new (allocator) HConstantFolding(graph, "constant_folding");
+ InstructionSimplifier* simplify1 = new (allocator) InstructionSimplifier(
graph, codegen, driver, stats);
- HSelectGenerator* select_generator = new (arena) HSelectGenerator(graph, handles, stats);
- HConstantFolding* fold2 = new (arena) HConstantFolding(
+ HSelectGenerator* select_generator = new (allocator) HSelectGenerator(graph, handles, stats);
+ HConstantFolding* fold2 = new (allocator) HConstantFolding(
graph, "constant_folding$after_inlining");
- HConstantFolding* fold3 = new (arena) HConstantFolding(graph, "constant_folding$after_bce");
- SideEffectsAnalysis* side_effects1 = new (arena) SideEffectsAnalysis(
+ HConstantFolding* fold3 = new (allocator) HConstantFolding(graph, "constant_folding$after_bce");
+ SideEffectsAnalysis* side_effects1 = new (allocator) SideEffectsAnalysis(
graph, "side_effects$before_gvn");
- SideEffectsAnalysis* side_effects2 = new (arena) SideEffectsAnalysis(
+ SideEffectsAnalysis* side_effects2 = new (allocator) SideEffectsAnalysis(
graph, "side_effects$before_lse");
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects1);
- LICM* licm = new (arena) LICM(graph, *side_effects1, stats);
- HInductionVarAnalysis* induction = new (arena) HInductionVarAnalysis(graph);
- BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph, *side_effects1, induction);
- HLoopOptimization* loop = new (arena) HLoopOptimization(graph, driver, induction, stats);
- LoadStoreAnalysis* lsa = new (arena) LoadStoreAnalysis(graph);
- LoadStoreElimination* lse = new (arena) LoadStoreElimination(graph, *side_effects2, *lsa, stats);
- HSharpening* sharpening = new (arena) HSharpening(
+ GVNOptimization* gvn = new (allocator) GVNOptimization(graph, *side_effects1);
+ LICM* licm = new (allocator) LICM(graph, *side_effects1, stats);
+ HInductionVarAnalysis* induction = new (allocator) HInductionVarAnalysis(graph);
+ BoundsCheckElimination* bce =
+ new (allocator) BoundsCheckElimination(graph, *side_effects1, induction);
+ HLoopOptimization* loop = new (allocator) HLoopOptimization(graph, driver, induction, stats);
+ LoadStoreAnalysis* lsa = new (allocator) LoadStoreAnalysis(graph);
+ LoadStoreElimination* lse =
+ new (allocator) LoadStoreElimination(graph, *side_effects2, *lsa, stats);
+ HSharpening* sharpening = new (allocator) HSharpening(
graph, codegen, dex_compilation_unit, driver, handles);
- InstructionSimplifier* simplify2 = new (arena) InstructionSimplifier(
+ InstructionSimplifier* simplify2 = new (allocator) InstructionSimplifier(
graph, codegen, driver, stats, "instruction_simplifier$after_inlining");
- InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier(
+ InstructionSimplifier* simplify3 = new (allocator) InstructionSimplifier(
graph, codegen, driver, stats, "instruction_simplifier$after_bce");
- InstructionSimplifier* simplify4 = new (arena) InstructionSimplifier(
+ InstructionSimplifier* simplify4 = new (allocator) InstructionSimplifier(
graph, codegen, driver, stats, "instruction_simplifier$before_codegen");
- IntrinsicsRecognizer* intrinsics = new (arena) IntrinsicsRecognizer(graph, stats);
- CHAGuardOptimization* cha_guard = new (arena) CHAGuardOptimization(graph);
- CodeSinking* code_sinking = new (arena) CodeSinking(graph, stats);
+ IntrinsicsRecognizer* intrinsics = new (allocator) IntrinsicsRecognizer(graph, stats);
+ CHAGuardOptimization* cha_guard = new (allocator) CHAGuardOptimization(graph);
+ CodeSinking* code_sinking = new (allocator) CodeSinking(graph, stats);
ConstructorFenceRedundancyElimination* cfre =
- new (arena) ConstructorFenceRedundancyElimination(graph, stats);
+ new (allocator) ConstructorFenceRedundancyElimination(graph, stats);
HOptimization* optimizations1[] = {
intrinsics,
@@ -865,7 +881,7 @@
}
static ArenaVector<linker::LinkerPatch> EmitAndSortLinkerPatches(CodeGenerator* codegen) {
- ArenaVector<linker::LinkerPatch> linker_patches(codegen->GetGraph()->GetArena()->Adapter());
+ ArenaVector<linker::LinkerPatch> linker_patches(codegen->GetGraph()->GetAllocator()->Adapter());
codegen->EmitLinkerPatches(&linker_patches);
// Sort patches by literal offset. Required for .oat_patches encoding.
@@ -877,14 +893,14 @@
return linker_patches;
}
-CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* arena,
+CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* allocator,
CodeVectorAllocator* code_allocator,
CodeGenerator* codegen,
CompilerDriver* compiler_driver,
const DexFile::CodeItem* code_item) const {
ArenaVector<linker::LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen);
- ArenaVector<uint8_t> stack_map(arena->Adapter(kArenaAllocStackMaps));
- ArenaVector<uint8_t> method_info(arena->Adapter(kArenaAllocStackMaps));
+ ArenaVector<uint8_t> stack_map(allocator->Adapter(kArenaAllocStackMaps));
+ ArenaVector<uint8_t> method_info(allocator->Adapter(kArenaAllocStackMaps));
size_t stack_map_size = 0;
size_t method_info_size = 0;
codegen->ComputeStackMapAndMethodInfoSize(&stack_map_size, &method_info_size);
@@ -912,7 +928,8 @@
return compiled_method;
}
-CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
+CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
+ ArenaStack* arena_stack,
CodeVectorAllocator* code_allocator,
const DexFile::CodeItem* code_item,
uint32_t access_flags,
@@ -970,8 +987,9 @@
/* verified_method */ nullptr,
dex_cache);
- HGraph* graph = new (arena) HGraph(
- arena,
+ HGraph* graph = new (allocator) HGraph(
+ allocator,
+ arena_stack,
dex_file,
method_idx,
compiler_driver->GetInstructionSet(),
@@ -1024,7 +1042,6 @@
codegen.get(),
compilation_stats_.get(),
interpreter_metadata,
- dex_cache,
handles);
GraphAnalysisResult result = builder.BuildGraph();
if (result != kAnalysisSuccess) {
@@ -1091,11 +1108,12 @@
DCHECK(Runtime::Current()->IsAotCompiler());
const VerifiedMethod* verified_method = compiler_driver->GetVerifiedMethod(&dex_file, method_idx);
DCHECK(!verified_method->HasRuntimeThrow());
- if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file)
- || verifier::CanCompilerHandleVerificationFailure(
- verified_method->GetEncounteredVerificationFailures())) {
- ArenaAllocator arena(Runtime::Current()->GetArenaPool());
- CodeVectorAllocator code_allocator(&arena);
+ if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file) ||
+ verifier::CanCompilerHandleVerificationFailure(
+ verified_method->GetEncounteredVerificationFailures())) {
+ ArenaAllocator allocator(Runtime::Current()->GetArenaPool());
+ ArenaStack arena_stack(Runtime::Current()->GetArenaPool());
+ CodeVectorAllocator code_allocator(&allocator);
std::unique_ptr<CodeGenerator> codegen;
{
ScopedObjectAccess soa(Thread::Current());
@@ -1103,7 +1121,8 @@
// Go to native so that we don't block GC during compilation.
ScopedThreadSuspension sts(soa.Self(), kNative);
codegen.reset(
- TryCompile(&arena,
+ TryCompile(&allocator,
+ &arena_stack,
&code_allocator,
code_item,
access_flags,
@@ -1120,12 +1139,16 @@
if (codegen.get() != nullptr) {
MaybeRecordStat(compilation_stats_.get(),
MethodCompilationStat::kCompiled);
- method = Emit(&arena, &code_allocator, codegen.get(), compiler_driver, code_item);
+ method = Emit(&allocator, &code_allocator, codegen.get(), compiler_driver, code_item);
if (kArenaAllocatorCountAllocations) {
- if (arena.BytesAllocated() > kArenaAllocatorMemoryReportThreshold) {
- MemStats mem_stats(arena.GetMemStats());
- LOG(INFO) << dex_file.PrettyMethod(method_idx) << " " << Dumpable<MemStats>(mem_stats);
+ size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated();
+ if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
+ MemStats mem_stats(allocator.GetMemStats());
+ MemStats peak_stats(arena_stack.GetPeakStats());
+ LOG(INFO) << dex_file.PrettyMethod(method_idx)
+ << "\n" << Dumpable<MemStats>(mem_stats)
+ << "\n" << Dumpable<MemStats>(peak_stats);
}
}
}
@@ -1200,8 +1223,9 @@
const uint32_t access_flags = method->GetAccessFlags();
const InvokeType invoke_type = method->GetInvokeType();
- ArenaAllocator arena(Runtime::Current()->GetJitArenaPool());
- CodeVectorAllocator code_allocator(&arena);
+ ArenaAllocator allocator(Runtime::Current()->GetJitArenaPool());
+ ArenaStack arena_stack(Runtime::Current()->GetJitArenaPool());
+ CodeVectorAllocator code_allocator(&allocator);
VariableSizedHandleScope handles(self);
std::unique_ptr<CodeGenerator> codegen;
@@ -1209,7 +1233,8 @@
// Go to native so that we don't block GC during compilation.
ScopedThreadSuspension sts(self, kNative);
codegen.reset(
- TryCompile(&arena,
+ TryCompile(&allocator,
+ &arena_stack,
&code_allocator,
code_item,
access_flags,
@@ -1227,9 +1252,13 @@
}
if (kArenaAllocatorCountAllocations) {
- if (arena.BytesAllocated() > kArenaAllocatorMemoryReportThreshold) {
- MemStats mem_stats(arena.GetMemStats());
- LOG(INFO) << dex_file->PrettyMethod(method_idx) << " " << Dumpable<MemStats>(mem_stats);
+ size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated();
+ if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
+ MemStats mem_stats(allocator.GetMemStats());
+ MemStats peak_stats(arena_stack.GetPeakStats());
+ LOG(INFO) << dex_file->PrettyMethod(method_idx)
+ << "\n" << Dumpable<MemStats>(mem_stats)
+ << "\n" << Dumpable<MemStats>(peak_stats);
}
}
}
@@ -1321,7 +1350,7 @@
CreateJITCodeEntryForAddress(code_address, std::move(elf_file));
}
- Runtime::Current()->GetJit()->AddMemoryUsage(method, arena.BytesUsed());
+ Runtime::Current()->GetJit()->AddMemoryUsage(method, allocator.BytesUsed());
if (jit_logger != nullptr) {
jit_logger->WriteLog(code, code_allocator.GetSize(), method);
}
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 33f1a4a..5632f9a 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_OPTIMIZING_OPTIMIZING_UNIT_TEST_H_
#define ART_COMPILER_OPTIMIZING_OPTIMIZING_UNIT_TEST_H_
+#include "base/scoped_arena_allocator.h"
#include "builder.h"
#include "common_compiler_test.h"
#include "dex_file.h"
@@ -47,7 +48,7 @@
LiveInterval* BuildInterval(const size_t ranges[][2],
size_t number_of_ranges,
- ArenaAllocator* allocator,
+ ScopedArenaAllocator* allocator,
int reg = -1,
HInstruction* defined_by = nullptr) {
LiveInterval* interval =
@@ -78,30 +79,69 @@
}
}
-inline HGraph* CreateGraph(ArenaAllocator* allocator) {
- return new (allocator) HGraph(
- allocator,
- *reinterpret_cast<DexFile*>(allocator->Alloc(sizeof(DexFile))),
+class ArenaPoolAndAllocator {
+ public:
+ ArenaPoolAndAllocator()
+ : pool_(), allocator_(&pool_), arena_stack_(&pool_), scoped_allocator_(&arena_stack_) { }
+
+ ArenaAllocator* GetAllocator() { return &allocator_; }
+ ArenaStack* GetArenaStack() { return &arena_stack_; }
+ ScopedArenaAllocator* GetScopedAllocator() { return &scoped_allocator_; }
+
+ private:
+ ArenaPool pool_;
+ ArenaAllocator allocator_;
+ ArenaStack arena_stack_;
+ ScopedArenaAllocator scoped_allocator_;
+};
+
+inline HGraph* CreateGraph(ArenaPoolAndAllocator* pool_and_allocator) {
+ return new (pool_and_allocator->GetAllocator()) HGraph(
+ pool_and_allocator->GetAllocator(),
+ pool_and_allocator->GetArenaStack(),
+ *reinterpret_cast<DexFile*>(pool_and_allocator->GetAllocator()->Alloc(sizeof(DexFile))),
/*method_idx*/-1,
kRuntimeISA);
}
-// Create a control-flow graph from Dex instructions.
-inline HGraph* CreateCFG(ArenaAllocator* allocator,
- const uint16_t* data,
- DataType::Type return_type = DataType::Type::kInt32) {
- const DexFile::CodeItem* item =
- reinterpret_cast<const DexFile::CodeItem*>(data);
- HGraph* graph = CreateGraph(allocator);
+class OptimizingUnitTest : public CommonCompilerTest {
+ protected:
+ OptimizingUnitTest() : pool_and_allocator_(new ArenaPoolAndAllocator()) { }
- {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope handles(soa.Self());
- HGraphBuilder builder(graph, *item, &handles, return_type);
- bool graph_built = (builder.BuildGraph() == kAnalysisSuccess);
- return graph_built ? graph : nullptr;
+ ArenaAllocator* GetAllocator() { return pool_and_allocator_->GetAllocator(); }
+ ArenaStack* GetArenaStack() { return pool_and_allocator_->GetArenaStack(); }
+ ScopedArenaAllocator* GetScopedAllocator() { return pool_and_allocator_->GetScopedAllocator(); }
+
+ void ResetPoolAndAllocator() {
+ pool_and_allocator_.reset(new ArenaPoolAndAllocator());
+ handles_.reset(); // When getting rid of the old HGraph, we can also reset handles_.
}
-}
+
+ HGraph* CreateGraph() {
+ return art::CreateGraph(pool_and_allocator_.get());
+ }
+
+ // Create a control-flow graph from Dex instructions.
+ HGraph* CreateCFG(const uint16_t* data, DataType::Type return_type = DataType::Type::kInt32) {
+ const DexFile::CodeItem* item =
+ reinterpret_cast<const DexFile::CodeItem*>(data);
+ HGraph* graph = CreateGraph();
+
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ if (handles_ == nullptr) {
+ handles_.reset(new VariableSizedHandleScope(soa.Self()));
+ }
+ HGraphBuilder builder(graph, *item, handles_.get(), return_type);
+ bool graph_built = (builder.BuildGraph() == kAnalysisSuccess);
+ return graph_built ? graph : nullptr;
+ }
+ }
+
+ private:
+ std::unique_ptr<ArenaPoolAndAllocator> pool_and_allocator_;
+ std::unique_ptr<VariableSizedHandleScope> handles_;
+};
// Naive string diff data type.
typedef std::list<std::pair<std::string, std::string>> diff_t;
diff --git a/compiler/optimizing/pc_relative_fixups_mips.cc b/compiler/optimizing/pc_relative_fixups_mips.cc
index e569b78..9d53585 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.cc
+++ b/compiler/optimizing/pc_relative_fixups_mips.cc
@@ -52,7 +52,7 @@
}
// Insert the base at the start of the entry block, move it to a better
// position later in MoveBaseIfNeeded().
- base_ = new (GetGraph()->GetArena()) HMipsComputeBaseMethodAddress();
+ base_ = new (GetGraph()->GetAllocator()) HMipsComputeBaseMethodAddress();
HBasicBlock* entry_block = GetGraph()->GetEntryBlock();
entry_block->InsertInstructionBefore(base_, entry_block->GetFirstInstruction());
DCHECK(base_ != nullptr);
@@ -112,7 +112,7 @@
InitializePCRelativeBasePointer();
HGraph* graph = GetGraph();
HBasicBlock* block = switch_insn->GetBlock();
- HMipsPackedSwitch* mips_switch = new (graph->GetArena()) HMipsPackedSwitch(
+ HMipsPackedSwitch* mips_switch = new (graph->GetAllocator()) HMipsPackedSwitch(
switch_insn->GetStartValue(),
switch_insn->GetNumEntries(),
switch_insn->InputAt(0),
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index a114e78..f92f4b2 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -137,7 +137,7 @@
HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(neg);
HGraph* graph = GetGraph();
HBasicBlock* block = neg->GetBlock();
- HX86FPNeg* x86_fp_neg = new (graph->GetArena()) HX86FPNeg(
+ HX86FPNeg* x86_fp_neg = new (graph->GetAllocator()) HX86FPNeg(
neg->GetType(),
neg->InputAt(0),
method_address,
@@ -156,7 +156,7 @@
HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(switch_insn);
HGraph* graph = GetGraph();
HBasicBlock* block = switch_insn->GetBlock();
- HX86PackedSwitch* x86_switch = new (graph->GetArena()) HX86PackedSwitch(
+ HX86PackedSwitch* x86_switch = new (graph->GetAllocator()) HX86PackedSwitch(
switch_insn->GetStartValue(),
switch_insn->GetNumEntries(),
switch_insn->InputAt(0),
@@ -176,7 +176,7 @@
// Insert the base at the start of the entry block, move it to a better
// position later in MoveBaseIfNeeded().
HX86ComputeBaseMethodAddress* method_address =
- new (GetGraph()->GetArena()) HX86ComputeBaseMethodAddress();
+ new (GetGraph()->GetAllocator()) HX86ComputeBaseMethodAddress();
if (has_irreducible_loops) {
cursor->GetBlock()->InsertInstructionBefore(method_address, cursor);
} else {
@@ -190,7 +190,7 @@
void ReplaceInput(HInstruction* insn, HConstant* value, int input_index, bool materialize) {
HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(insn);
HX86LoadFromConstantTable* load_constant =
- new (GetGraph()->GetArena()) HX86LoadFromConstantTable(method_address, value);
+ new (GetGraph()->GetAllocator()) HX86LoadFromConstantTable(method_address, value);
if (!materialize) {
load_constant->MarkEmittedAtUseSite();
}
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index b52de36..fe98aa9 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -56,12 +56,12 @@
// Add a fake environment for String.charAt() inline info as we want
// the exception to appear as being thrown from there.
ArtMethod* char_at_method = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt);
- ArenaAllocator* arena = GetGraph()->GetArena();
- HEnvironment* environment = new (arena) HEnvironment(arena,
- /* number_of_vregs */ 0u,
- char_at_method,
- /* dex_pc */ dex::kDexNoIndex,
- check);
+ ArenaAllocator* allocator = GetGraph()->GetAllocator();
+ HEnvironment* environment = new (allocator) HEnvironment(allocator,
+ /* number_of_vregs */ 0u,
+ char_at_method,
+ /* dex_pc */ dex::kDexNoIndex,
+ check);
check->InsertRawEnvironment(environment);
}
}
diff --git a/compiler/optimizing/pretty_printer_test.cc b/compiler/optimizing/pretty_printer_test.cc
index 14d2360..4aec6d3 100644
--- a/compiler/optimizing/pretty_printer_test.cc
+++ b/compiler/optimizing/pretty_printer_test.cc
@@ -27,17 +27,18 @@
namespace art {
-static void TestCode(const uint16_t* data, const char* expected) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+class PrettyPrinterTest : public OptimizingUnitTest {
+ protected:
+ void TestCode(const uint16_t* data, const char* expected);
+};
+
+void PrettyPrinterTest::TestCode(const uint16_t* data, const char* expected) {
+ HGraph* graph = CreateCFG(data);
StringPrettyPrinter printer(graph);
printer.VisitInsertionOrder();
ASSERT_STREQ(expected, printer.str().c_str());
}
-class PrettyPrinterTest : public CommonCompilerTest {};
-
TEST_F(PrettyPrinterTest, ReturnVoid) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::RETURN_VOID);
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index f5064c3..6d9ebc8 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -122,7 +122,7 @@
class_loader_(class_loader),
hint_dex_cache_(hint_dex_cache),
handle_cache_(handles),
- worklist_(graph->GetArena()->Adapter(kArenaAllocReferenceTypePropagation)),
+ worklist_(graph->GetAllocator()->Adapter(kArenaAllocReferenceTypePropagation)),
is_first_run_(is_first_run) {
}
@@ -235,7 +235,7 @@
: start_block->GetFirstInstruction();
if (ShouldCreateBoundType(
insert_point, receiver, class_rti, start_instruction, start_block)) {
- bound_type = new (receiver->GetBlock()->GetGraph()->GetArena()) HBoundType(receiver);
+ bound_type = new (receiver->GetBlock()->GetGraph()->GetAllocator()) HBoundType(receiver);
bound_type->SetUpperBound(class_rti, /* bound_can_be_null */ false);
start_block->InsertInstructionBefore(bound_type, insert_point);
// To comply with the RTP algorithm, don't type the bound type just yet, it will
diff --git a/compiler/optimizing/reference_type_propagation_test.cc b/compiler/optimizing/reference_type_propagation_test.cc
index cb2af91..028b6d3b7 100644
--- a/compiler/optimizing/reference_type_propagation_test.cc
+++ b/compiler/optimizing/reference_type_propagation_test.cc
@@ -28,22 +28,20 @@
* Fixture class for unit testing the ReferenceTypePropagation phase. Used to verify the
* functionality of methods and situations that are hard to set up with checker tests.
*/
-class ReferenceTypePropagationTest : public CommonCompilerTest {
+class ReferenceTypePropagationTest : public OptimizingUnitTest {
public:
- ReferenceTypePropagationTest() : pool_(), allocator_(&pool_), propagation_(nullptr) {
- graph_ = CreateGraph(&allocator_);
- }
+ ReferenceTypePropagationTest() : graph_(CreateGraph()), propagation_(nullptr) { }
~ReferenceTypePropagationTest() { }
void SetupPropagation(VariableSizedHandleScope* handles) {
graph_->InitializeInexactObjectRTI(handles);
- propagation_ = new (&allocator_) ReferenceTypePropagation(graph_,
- Handle<mirror::ClassLoader>(),
- Handle<mirror::DexCache>(),
- handles,
- true,
- "test_prop");
+ propagation_ = new (GetAllocator()) ReferenceTypePropagation(graph_,
+ Handle<mirror::ClassLoader>(),
+ Handle<mirror::DexCache>(),
+ handles,
+ true,
+ "test_prop");
}
// Relay method to merge type in reference type propagation.
@@ -68,8 +66,6 @@
}
// General building fields.
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
ReferenceTypePropagation* propagation_;
diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc
index 1786aa7..5ed9e02 100644
--- a/compiler/optimizing/register_allocation_resolver.cc
+++ b/compiler/optimizing/register_allocation_resolver.cc
@@ -22,10 +22,9 @@
namespace art {
-RegisterAllocationResolver::RegisterAllocationResolver(ArenaAllocator* allocator,
- CodeGenerator* codegen,
+RegisterAllocationResolver::RegisterAllocationResolver(CodeGenerator* codegen,
const SsaLivenessAnalysis& liveness)
- : allocator_(allocator),
+ : allocator_(codegen->GetGraph()->GetAllocator()),
codegen_(codegen),
liveness_(liveness) {}
@@ -36,7 +35,7 @@
size_t float_spill_slots,
size_t double_spill_slots,
size_t catch_phi_spill_slots,
- const ArenaVector<LiveInterval*>& temp_intervals) {
+ ArrayRef<LiveInterval* const> temp_intervals) {
size_t spill_slots = int_spill_slots
+ long_spill_slots
+ float_spill_slots
diff --git a/compiler/optimizing/register_allocation_resolver.h b/compiler/optimizing/register_allocation_resolver.h
index 4a148e0..2783717 100644
--- a/compiler/optimizing/register_allocation_resolver.h
+++ b/compiler/optimizing/register_allocation_resolver.h
@@ -17,7 +17,6 @@
#ifndef ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATION_RESOLVER_H_
#define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATION_RESOLVER_H_
-#include "base/arena_containers.h"
#include "base/array_ref.h"
#include "base/value_object.h"
#include "data_type.h"
@@ -40,9 +39,7 @@
*/
class RegisterAllocationResolver : ValueObject {
public:
- RegisterAllocationResolver(ArenaAllocator* allocator,
- CodeGenerator* codegen,
- const SsaLivenessAnalysis& liveness);
+ RegisterAllocationResolver(CodeGenerator* codegen, const SsaLivenessAnalysis& liveness);
void Resolve(ArrayRef<HInstruction* const> safepoints,
size_t reserved_out_slots, // Includes slot(s) for the art method.
@@ -51,7 +48,7 @@
size_t float_spill_slots,
size_t double_spill_slots,
size_t catch_phi_spill_slots,
- const ArenaVector<LiveInterval*>& temp_intervals);
+ ArrayRef<LiveInterval* const> temp_intervals);
private:
// Update live registers of safepoint location summary.
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index c3b33e2..ece9904 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -19,6 +19,8 @@
#include <iostream>
#include <sstream>
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
#include "base/bit_vector-inl.h"
#include "code_generator.h"
#include "register_allocator_graph_color.h"
@@ -27,22 +29,24 @@
namespace art {
-RegisterAllocator::RegisterAllocator(ArenaAllocator* allocator,
+RegisterAllocator::RegisterAllocator(ScopedArenaAllocator* allocator,
CodeGenerator* codegen,
const SsaLivenessAnalysis& liveness)
: allocator_(allocator),
codegen_(codegen),
liveness_(liveness) {}
-RegisterAllocator* RegisterAllocator::Create(ArenaAllocator* allocator,
- CodeGenerator* codegen,
- const SsaLivenessAnalysis& analysis,
- Strategy strategy) {
+std::unique_ptr<RegisterAllocator> RegisterAllocator::Create(ScopedArenaAllocator* allocator,
+ CodeGenerator* codegen,
+ const SsaLivenessAnalysis& analysis,
+ Strategy strategy) {
switch (strategy) {
case kRegisterAllocatorLinearScan:
- return new (allocator) RegisterAllocatorLinearScan(allocator, codegen, analysis);
+ return std::unique_ptr<RegisterAllocator>(
+ new (allocator) RegisterAllocatorLinearScan(allocator, codegen, analysis));
case kRegisterAllocatorGraphColor:
- return new (allocator) RegisterAllocatorGraphColor(allocator, codegen, analysis);
+ return std::unique_ptr<RegisterAllocator>(
+ new (allocator) RegisterAllocatorGraphColor(allocator, codegen, analysis));
default:
LOG(FATAL) << "Invalid register allocation strategy: " << strategy;
UNREACHABLE();
@@ -87,18 +91,18 @@
DISALLOW_COPY_AND_ASSIGN(AllRangesIterator);
};
-bool RegisterAllocator::ValidateIntervals(const ArenaVector<LiveInterval*>& intervals,
+bool RegisterAllocator::ValidateIntervals(ArrayRef<LiveInterval* const> intervals,
size_t number_of_spill_slots,
size_t number_of_out_slots,
const CodeGenerator& codegen,
- ArenaAllocator* allocator,
bool processing_core_registers,
bool log_fatal_on_failure) {
size_t number_of_registers = processing_core_registers
? codegen.GetNumberOfCoreRegisters()
: codegen.GetNumberOfFloatingPointRegisters();
- ArenaVector<ArenaBitVector*> liveness_of_values(
- allocator->Adapter(kArenaAllocRegisterAllocatorValidate));
+ ScopedArenaAllocator allocator(codegen.GetGraph()->GetArenaStack());
+ ScopedArenaVector<ArenaBitVector*> liveness_of_values(
+ allocator.Adapter(kArenaAllocRegisterAllocatorValidate));
liveness_of_values.reserve(number_of_registers + number_of_spill_slots);
size_t max_end = 0u;
@@ -112,7 +116,8 @@
// allocated will populate the associated bit vector based on its live ranges.
for (size_t i = 0; i < number_of_registers + number_of_spill_slots; ++i) {
liveness_of_values.push_back(
- ArenaBitVector::Create(allocator, max_end, false, kArenaAllocRegisterAllocatorValidate));
+ ArenaBitVector::Create(&allocator, max_end, false, kArenaAllocRegisterAllocatorValidate));
+ liveness_of_values.back()->ClearAllBits();
}
for (LiveInterval* start_interval : intervals) {
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index 4375d68..eaeec3b 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -18,7 +18,7 @@
#define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_H_
#include "arch/instruction_set.h"
-#include "base/arena_containers.h"
+#include "base/array_ref.h"
#include "base/arena_object.h"
#include "base/macros.h"
@@ -36,7 +36,7 @@
/**
* Base class for any register allocator.
*/
-class RegisterAllocator : public ArenaObject<kArenaAllocRegisterAllocator> {
+class RegisterAllocator : public DeletableArenaObject<kArenaAllocRegisterAllocator> {
public:
enum Strategy {
kRegisterAllocatorLinearScan,
@@ -45,10 +45,10 @@
static constexpr Strategy kRegisterAllocatorDefault = kRegisterAllocatorLinearScan;
- static RegisterAllocator* Create(ArenaAllocator* allocator,
- CodeGenerator* codegen,
- const SsaLivenessAnalysis& analysis,
- Strategy strategy = kRegisterAllocatorDefault);
+ static std::unique_ptr<RegisterAllocator> Create(ScopedArenaAllocator* allocator,
+ CodeGenerator* codegen,
+ const SsaLivenessAnalysis& analysis,
+ Strategy strategy = kRegisterAllocatorDefault);
virtual ~RegisterAllocator() = default;
@@ -64,18 +64,17 @@
InstructionSet instruction_set);
// Verifies that live intervals do not conflict. Used by unit testing.
- static bool ValidateIntervals(const ArenaVector<LiveInterval*>& intervals,
+ static bool ValidateIntervals(ArrayRef<LiveInterval* const> intervals,
size_t number_of_spill_slots,
size_t number_of_out_slots,
const CodeGenerator& codegen,
- ArenaAllocator* allocator,
bool processing_core_registers,
bool log_fatal_on_failure);
static constexpr const char* kRegisterAllocatorPassName = "register";
protected:
- RegisterAllocator(ArenaAllocator* allocator,
+ RegisterAllocator(ScopedArenaAllocator* allocator,
CodeGenerator* codegen,
const SsaLivenessAnalysis& analysis);
@@ -88,7 +87,7 @@
// to find an optimal split position.
LiveInterval* SplitBetween(LiveInterval* interval, size_t from, size_t to);
- ArenaAllocator* const allocator_;
+ ScopedArenaAllocator* allocator_;
CodeGenerator* const codegen_;
const SsaLivenessAnalysis& liveness_;
};
diff --git a/compiler/optimizing/register_allocator_graph_color.cc b/compiler/optimizing/register_allocator_graph_color.cc
index 33df607..ad5248e 100644
--- a/compiler/optimizing/register_allocator_graph_color.cc
+++ b/compiler/optimizing/register_allocator_graph_color.cc
@@ -217,13 +217,12 @@
// and thus whether it is safe to prune it from the interference graph early on.
class InterferenceNode : public ArenaObject<kArenaAllocRegisterAllocator> {
public:
- InterferenceNode(ArenaAllocator* allocator,
- LiveInterval* interval,
+ InterferenceNode(LiveInterval* interval,
const SsaLivenessAnalysis& liveness)
: stage(NodeStage::kInitial),
interval_(interval),
- adjacent_nodes_(allocator->Adapter(kArenaAllocRegisterAllocator)),
- coalesce_opportunities_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+ adjacent_nodes_(nullptr),
+ coalesce_opportunities_(nullptr),
out_degree_(interval->HasRegister() ? std::numeric_limits<size_t>::max() : 0),
alias_(this),
spill_weight_(ComputeSpillWeight(interval, liveness)),
@@ -232,21 +231,26 @@
DCHECK(!interval->IsHighInterval()) << "Pair nodes should be represented by the low interval";
}
- void AddInterference(InterferenceNode* other, bool guaranteed_not_interfering_yet) {
+ void AddInterference(InterferenceNode* other,
+ bool guaranteed_not_interfering_yet,
+ ScopedArenaDeque<ScopedArenaVector<InterferenceNode*>>* storage) {
DCHECK(!IsPrecolored()) << "To save memory, fixed nodes should not have outgoing interferences";
DCHECK_NE(this, other) << "Should not create self loops in the interference graph";
DCHECK_EQ(this, alias_) << "Should not add interferences to a node that aliases another";
DCHECK_NE(stage, NodeStage::kPruned);
DCHECK_NE(other->stage, NodeStage::kPruned);
+ if (adjacent_nodes_ == nullptr) {
+ ScopedArenaVector<InterferenceNode*>::allocator_type adapter(storage->get_allocator());
+ storage->emplace_back(adapter);
+ adjacent_nodes_ = &storage->back();
+ }
if (guaranteed_not_interfering_yet) {
- DCHECK(std::find(adjacent_nodes_.begin(), adjacent_nodes_.end(), other)
- == adjacent_nodes_.end());
- adjacent_nodes_.push_back(other);
+ DCHECK(!ContainsElement(GetAdjacentNodes(), other));
+ adjacent_nodes_->push_back(other);
out_degree_ += EdgeWeightWith(other);
} else {
- auto it = std::find(adjacent_nodes_.begin(), adjacent_nodes_.end(), other);
- if (it == adjacent_nodes_.end()) {
- adjacent_nodes_.push_back(other);
+ if (!ContainsElement(GetAdjacentNodes(), other)) {
+ adjacent_nodes_->push_back(other);
out_degree_ += EdgeWeightWith(other);
}
}
@@ -255,26 +259,29 @@
void RemoveInterference(InterferenceNode* other) {
DCHECK_EQ(this, alias_) << "Should not remove interferences from a coalesced node";
DCHECK_EQ(other->stage, NodeStage::kPruned) << "Should only remove interferences when pruning";
- auto it = std::find(adjacent_nodes_.begin(), adjacent_nodes_.end(), other);
- if (it != adjacent_nodes_.end()) {
- adjacent_nodes_.erase(it);
- out_degree_ -= EdgeWeightWith(other);
+ if (adjacent_nodes_ != nullptr) {
+ auto it = std::find(adjacent_nodes_->begin(), adjacent_nodes_->end(), other);
+ if (it != adjacent_nodes_->end()) {
+ adjacent_nodes_->erase(it);
+ out_degree_ -= EdgeWeightWith(other);
+ }
}
}
bool ContainsInterference(InterferenceNode* other) const {
DCHECK(!IsPrecolored()) << "Should not query fixed nodes for interferences";
DCHECK_EQ(this, alias_) << "Should not query a coalesced node for interferences";
- auto it = std::find(adjacent_nodes_.begin(), adjacent_nodes_.end(), other);
- return it != adjacent_nodes_.end();
+ return ContainsElement(GetAdjacentNodes(), other);
}
LiveInterval* GetInterval() const {
return interval_;
}
- const ArenaVector<InterferenceNode*>& GetAdjacentNodes() const {
- return adjacent_nodes_;
+ ArrayRef<InterferenceNode*> GetAdjacentNodes() const {
+ return adjacent_nodes_ != nullptr
+ ? ArrayRef<InterferenceNode*>(*adjacent_nodes_)
+ : ArrayRef<InterferenceNode*>();
}
size_t GetOutDegree() const {
@@ -283,16 +290,22 @@
return out_degree_;
}
- void AddCoalesceOpportunity(CoalesceOpportunity* opportunity) {
- coalesce_opportunities_.push_back(opportunity);
+ void AddCoalesceOpportunity(CoalesceOpportunity* opportunity,
+ ScopedArenaDeque<ScopedArenaVector<CoalesceOpportunity*>>* storage) {
+ if (coalesce_opportunities_ == nullptr) {
+ ScopedArenaVector<CoalesceOpportunity*>::allocator_type adapter(storage->get_allocator());
+ storage->emplace_back(adapter);
+ coalesce_opportunities_ = &storage->back();
+ }
+ coalesce_opportunities_->push_back(opportunity);
}
void ClearCoalesceOpportunities() {
- coalesce_opportunities_.clear();
+ coalesce_opportunities_ = nullptr;
}
bool IsMoveRelated() const {
- for (CoalesceOpportunity* opportunity : coalesce_opportunities_) {
+ for (CoalesceOpportunity* opportunity : GetCoalesceOpportunities()) {
if (opportunity->stage == CoalesceStage::kWorklist ||
opportunity->stage == CoalesceStage::kActive) {
return true;
@@ -325,8 +338,10 @@
return alias_;
}
- const ArenaVector<CoalesceOpportunity*>& GetCoalesceOpportunities() const {
- return coalesce_opportunities_;
+ ArrayRef<CoalesceOpportunity*> GetCoalesceOpportunities() const {
+ return coalesce_opportunities_ != nullptr
+ ? ArrayRef<CoalesceOpportunity*>(*coalesce_opportunities_)
+ : ArrayRef<CoalesceOpportunity*>();
}
float GetSpillWeight() const {
@@ -361,10 +376,10 @@
// All nodes interfering with this one.
// We use an unsorted vector as a set, since a tree or hash set is too heavy for the
// set sizes that we encounter. Using a vector leads to much better performance.
- ArenaVector<InterferenceNode*> adjacent_nodes_;
+ ScopedArenaVector<InterferenceNode*>* adjacent_nodes_; // Owned by ColoringIteration.
// Interference nodes that this node should be coalesced with to reduce moves.
- ArenaVector<CoalesceOpportunity*> coalesce_opportunities_;
+ ScopedArenaVector<CoalesceOpportunity*>* coalesce_opportunities_; // Owned by ColoringIteration.
// The maximum number of colors with which this node could interfere. This could be more than
// the number of adjacent nodes if this is a pair node, or if some adjacent nodes are pair nodes.
@@ -416,7 +431,7 @@
class ColoringIteration {
public:
ColoringIteration(RegisterAllocatorGraphColor* register_allocator,
- ArenaAllocator* allocator,
+ ScopedArenaAllocator* allocator,
bool processing_core_regs,
size_t num_regs)
: register_allocator_(register_allocator),
@@ -430,15 +445,17 @@
freeze_worklist_(allocator->Adapter(kArenaAllocRegisterAllocator)),
spill_worklist_(HasGreaterNodePriority, allocator->Adapter(kArenaAllocRegisterAllocator)),
coalesce_worklist_(CoalesceOpportunity::CmpPriority,
- allocator->Adapter(kArenaAllocRegisterAllocator)) {}
+ allocator->Adapter(kArenaAllocRegisterAllocator)),
+ adjacent_nodes_links_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+ coalesce_opportunities_links_(allocator->Adapter(kArenaAllocRegisterAllocator)) {}
// Use the intervals collected from instructions to construct an
// interference graph mapping intervals to adjacency lists.
// Also, collect synthesized safepoint nodes, used to keep
// track of live intervals across safepoints.
// TODO: Should build safepoints elsewhere.
- void BuildInterferenceGraph(const ArenaVector<LiveInterval*>& intervals,
- const ArenaVector<InterferenceNode*>& physical_nodes);
+ void BuildInterferenceGraph(const ScopedArenaVector<LiveInterval*>& intervals,
+ const ScopedArenaVector<InterferenceNode*>& physical_nodes);
// Add coalesce opportunities to interference nodes.
void FindCoalesceOpportunities();
@@ -456,8 +473,8 @@
// Return prunable nodes.
// The register allocator will need to access prunable nodes after coloring
// in order to tell the code generator which registers have been assigned.
- const ArenaVector<InterferenceNode*>& GetPrunableNodes() const {
- return prunable_nodes_;
+ ArrayRef<InterferenceNode* const> GetPrunableNodes() const {
+ return ArrayRef<InterferenceNode* const>(prunable_nodes_);
}
private:
@@ -503,38 +520,46 @@
// needed to split intervals and assign spill slots.
RegisterAllocatorGraphColor* register_allocator_;
- // An arena allocator used for a single graph coloring attempt.
- ArenaAllocator* allocator_;
+ // A scoped arena allocator used for a single graph coloring attempt.
+ ScopedArenaAllocator* allocator_;
const bool processing_core_regs_;
const size_t num_regs_;
// A map from live intervals to interference nodes.
- ArenaHashMap<LiveInterval*, InterferenceNode*> interval_node_map_;
+ ScopedArenaHashMap<LiveInterval*, InterferenceNode*> interval_node_map_;
// Uncolored nodes that should be pruned from the interference graph.
- ArenaVector<InterferenceNode*> prunable_nodes_;
+ ScopedArenaVector<InterferenceNode*> prunable_nodes_;
// A stack of nodes pruned from the interference graph, waiting to be pruned.
- ArenaStdStack<InterferenceNode*> pruned_nodes_;
+ ScopedArenaStdStack<InterferenceNode*> pruned_nodes_;
// A queue containing low degree, non-move-related nodes that can pruned immediately.
- ArenaDeque<InterferenceNode*> simplify_worklist_;
+ ScopedArenaDeque<InterferenceNode*> simplify_worklist_;
// A queue containing low degree, move-related nodes.
- ArenaDeque<InterferenceNode*> freeze_worklist_;
+ ScopedArenaDeque<InterferenceNode*> freeze_worklist_;
// A queue containing high degree nodes.
// If we have to prune from the spill worklist, we cannot guarantee
// the pruned node a color, so we order the worklist by priority.
- ArenaPriorityQueue<InterferenceNode*, decltype(&HasGreaterNodePriority)> spill_worklist_;
+ ScopedArenaPriorityQueue<InterferenceNode*, decltype(&HasGreaterNodePriority)> spill_worklist_;
// A queue containing coalesce opportunities.
// We order the coalesce worklist by priority, since some coalesce opportunities (e.g., those
// inside of loops) are more important than others.
- ArenaPriorityQueue<CoalesceOpportunity*,
- decltype(&CoalesceOpportunity::CmpPriority)> coalesce_worklist_;
+ ScopedArenaPriorityQueue<CoalesceOpportunity*,
+ decltype(&CoalesceOpportunity::CmpPriority)> coalesce_worklist_;
+
+ // Storage for links to adjacent nodes for interference nodes.
+ // Using std::deque so that elements do not move when adding new ones.
+ ScopedArenaDeque<ScopedArenaVector<InterferenceNode*>> adjacent_nodes_links_;
+
+ // Storage for links to coalesce opportunities for interference nodes.
+ // Using std::deque so that elements do not move when adding new ones.
+ ScopedArenaDeque<ScopedArenaVector<CoalesceOpportunity*>> coalesce_opportunities_links_;
DISALLOW_COPY_AND_ASSIGN(ColoringIteration);
};
@@ -547,7 +572,7 @@
return static_cast<size_t>(InstructionSetPointerSize(codegen.GetInstructionSet())) / kVRegSize;
}
-RegisterAllocatorGraphColor::RegisterAllocatorGraphColor(ArenaAllocator* allocator,
+RegisterAllocatorGraphColor::RegisterAllocatorGraphColor(ScopedArenaAllocator* allocator,
CodeGenerator* codegen,
const SsaLivenessAnalysis& liveness,
bool iterative_move_coalescing)
@@ -574,8 +599,7 @@
physical_core_nodes_.resize(codegen_->GetNumberOfCoreRegisters(), nullptr);
for (size_t i = 0; i < codegen_->GetNumberOfCoreRegisters(); ++i) {
LiveInterval* interval = LiveInterval::MakeFixedInterval(allocator_, i, DataType::Type::kInt32);
- physical_core_nodes_[i] =
- new (allocator_) InterferenceNode(allocator_, interval, liveness);
+ physical_core_nodes_[i] = new (allocator_) InterferenceNode(interval, liveness);
physical_core_nodes_[i]->stage = NodeStage::kPrecolored;
core_intervals_.push_back(interval);
if (codegen_->IsBlockedCoreRegister(i)) {
@@ -587,8 +611,7 @@
for (size_t i = 0; i < codegen_->GetNumberOfFloatingPointRegisters(); ++i) {
LiveInterval* interval =
LiveInterval::MakeFixedInterval(allocator_, i, DataType::Type::kFloat32);
- physical_fp_nodes_[i] =
- new (allocator_) InterferenceNode(allocator_, interval, liveness);
+ physical_fp_nodes_[i] = new (allocator_) InterferenceNode(interval, liveness);
physical_fp_nodes_[i]->stage = NodeStage::kPrecolored;
fp_intervals_.push_back(interval);
if (codegen_->IsBlockedFloatingPointRegister(i)) {
@@ -597,12 +620,14 @@
}
}
+RegisterAllocatorGraphColor::~RegisterAllocatorGraphColor() {}
+
void RegisterAllocatorGraphColor::AllocateRegisters() {
// (1) Collect and prepare live intervals.
ProcessInstructions();
for (bool processing_core_regs : {true, false}) {
- ArenaVector<LiveInterval*>& intervals = processing_core_regs
+ ScopedArenaVector<LiveInterval*>& intervals = processing_core_regs
? core_intervals_
: fp_intervals_;
size_t num_registers = processing_core_regs
@@ -619,17 +644,15 @@
<< "should be prioritized over long ones, because they cannot be split further.)";
// Many data structures are cleared between graph coloring attempts, so we reduce
- // total memory usage by using a new arena allocator for each attempt.
- ArenaAllocator coloring_attempt_allocator(allocator_->GetArenaPool());
+ // total memory usage by using a new scoped arena allocator for each attempt.
+ ScopedArenaAllocator coloring_attempt_allocator(allocator_->GetArenaStack());
ColoringIteration iteration(this,
&coloring_attempt_allocator,
processing_core_regs,
num_registers);
- // (2) Build the interference graph. Also gather safepoints.
- ArenaVector<InterferenceNode*> safepoints(
- coloring_attempt_allocator.Adapter(kArenaAllocRegisterAllocator));
- ArenaVector<InterferenceNode*>& physical_nodes = processing_core_regs
+ // (2) Build the interference graph.
+ ScopedArenaVector<InterferenceNode*>& physical_nodes = processing_core_regs
? physical_core_nodes_
: physical_fp_nodes_;
iteration.BuildInterferenceGraph(intervals, physical_nodes);
@@ -691,7 +714,7 @@
} // for processing_core_instructions
// (6) Resolve locations and deconstruct SSA form.
- RegisterAllocationResolver(allocator_, codegen_, liveness_)
+ RegisterAllocationResolver(codegen_, liveness_)
.Resolve(ArrayRef<HInstruction* const>(safepoints_),
reserved_art_method_slots_ + reserved_out_slots_,
num_int_spill_slots_,
@@ -699,7 +722,7 @@
num_float_spill_slots_,
num_double_spill_slots_,
catch_phi_spill_slot_counter_,
- temp_intervals_);
+ ArrayRef<LiveInterval* const>(temp_intervals_));
if (kIsDebugBuild) {
Validate(/*log_fatal_on_failure*/ true);
@@ -708,8 +731,9 @@
bool RegisterAllocatorGraphColor::Validate(bool log_fatal_on_failure) {
for (bool processing_core_regs : {true, false}) {
- ArenaVector<LiveInterval*> intervals(
- allocator_->Adapter(kArenaAllocRegisterAllocatorValidate));
+ ScopedArenaAllocator allocator(allocator_->GetArenaStack());
+ ScopedArenaVector<LiveInterval*> intervals(
+ allocator.Adapter(kArenaAllocRegisterAllocatorValidate));
for (size_t i = 0; i < liveness_.GetNumberOfSsaValues(); ++i) {
HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
LiveInterval* interval = instruction->GetLiveInterval();
@@ -718,7 +742,7 @@
}
}
- ArenaVector<InterferenceNode*>& physical_nodes = processing_core_regs
+ ScopedArenaVector<InterferenceNode*>& physical_nodes = processing_core_regs
? physical_core_nodes_
: physical_fp_nodes_;
for (InterferenceNode* fixed : physical_nodes) {
@@ -742,11 +766,10 @@
+ num_float_spill_slots_
+ num_double_spill_slots_
+ catch_phi_spill_slot_counter_;
- bool ok = ValidateIntervals(intervals,
+ bool ok = ValidateIntervals(ArrayRef<LiveInterval* const>(intervals),
spill_slots,
reserved_art_method_slots_ + reserved_out_slots_,
*codegen_,
- allocator_,
processing_core_regs,
log_fatal_on_failure);
if (!ok) {
@@ -825,7 +848,7 @@
CheckForFixedOutput(instruction);
AllocateSpillSlotForCatchPhi(instruction);
- ArenaVector<LiveInterval*>& intervals = IsCoreInterval(interval)
+ ScopedArenaVector<LiveInterval*>& intervals = IsCoreInterval(interval)
? core_intervals_
: fp_intervals_;
if (interval->HasSpillSlot() || instruction->IsConstant()) {
@@ -1075,11 +1098,12 @@
} else if (to->IsPrecolored()) {
// It is important that only a single node represents a given fixed register in the
// interference graph. We retrieve that node here.
- const ArenaVector<InterferenceNode*>& physical_nodes = to->GetInterval()->IsFloatingPoint()
- ? register_allocator_->physical_fp_nodes_
- : register_allocator_->physical_core_nodes_;
+ const ScopedArenaVector<InterferenceNode*>& physical_nodes =
+ to->GetInterval()->IsFloatingPoint() ? register_allocator_->physical_fp_nodes_
+ : register_allocator_->physical_core_nodes_;
InterferenceNode* physical_node = physical_nodes[to->GetInterval()->GetRegister()];
- from->AddInterference(physical_node, /*guaranteed_not_interfering_yet*/ false);
+ from->AddInterference(
+ physical_node, /*guaranteed_not_interfering_yet*/ false, &adjacent_nodes_links_);
DCHECK_EQ(to->GetInterval()->GetRegister(), physical_node->GetInterval()->GetRegister());
DCHECK_EQ(to->GetAlias(), physical_node) << "Fixed nodes should alias the canonical fixed node";
@@ -1097,11 +1121,12 @@
physical_nodes[to->GetInterval()->GetHighInterval()->GetRegister()];
DCHECK_EQ(to->GetInterval()->GetHighInterval()->GetRegister(),
high_node->GetInterval()->GetRegister());
- from->AddInterference(high_node, /*guaranteed_not_interfering_yet*/ false);
+ from->AddInterference(
+ high_node, /*guaranteed_not_interfering_yet*/ false, &adjacent_nodes_links_);
}
} else {
// Standard interference between two uncolored nodes.
- from->AddInterference(to, guaranteed_not_interfering_yet);
+ from->AddInterference(to, guaranteed_not_interfering_yet, &adjacent_nodes_links_);
}
if (both_directions) {
@@ -1156,8 +1181,8 @@
}
void ColoringIteration::BuildInterferenceGraph(
- const ArenaVector<LiveInterval*>& intervals,
- const ArenaVector<InterferenceNode*>& physical_nodes) {
+ const ScopedArenaVector<LiveInterval*>& intervals,
+ const ScopedArenaVector<InterferenceNode*>& physical_nodes) {
DCHECK(interval_node_map_.Empty() && prunable_nodes_.empty());
// Build the interference graph efficiently by ordering range endpoints
// by position and doing a linear sweep to find interferences. (That is, we
@@ -1171,7 +1196,7 @@
//
// For simplicity, we create a tuple for each endpoint, and then sort the tuples.
// Tuple contents: (position, is_range_beginning, node).
- ArenaVector<std::tuple<size_t, bool, InterferenceNode*>> range_endpoints(
+ ScopedArenaVector<std::tuple<size_t, bool, InterferenceNode*>> range_endpoints(
allocator_->Adapter(kArenaAllocRegisterAllocator));
// We reserve plenty of space to avoid excessive copying.
@@ -1181,8 +1206,8 @@
for (LiveInterval* sibling = parent; sibling != nullptr; sibling = sibling->GetNextSibling()) {
LiveRange* range = sibling->GetFirstRange();
if (range != nullptr) {
- InterferenceNode* node = new (allocator_) InterferenceNode(
- allocator_, sibling, register_allocator_->liveness_);
+ InterferenceNode* node =
+ new (allocator_) InterferenceNode(sibling, register_allocator_->liveness_);
interval_node_map_.Insert(std::make_pair(sibling, node));
if (sibling->HasRegister()) {
@@ -1217,8 +1242,7 @@
});
// Nodes live at the current position in the linear sweep.
- ArenaVector<InterferenceNode*> live(
- allocator_->Adapter(kArenaAllocRegisterAllocator));
+ ScopedArenaVector<InterferenceNode*> live(allocator_->Adapter(kArenaAllocRegisterAllocator));
// Linear sweep. When we encounter the beginning of a range, we add the corresponding node to the
// live set. When we encounter the end of a range, we remove the corresponding node
@@ -1261,8 +1285,8 @@
<< "Nodes of different memory widths should never be coalesced";
CoalesceOpportunity* opportunity =
new (allocator_) CoalesceOpportunity(a, b, kind, position, register_allocator_->liveness_);
- a->AddCoalesceOpportunity(opportunity);
- b->AddCoalesceOpportunity(opportunity);
+ a->AddCoalesceOpportunity(opportunity, &coalesce_opportunities_links_);
+ b->AddCoalesceOpportunity(opportunity, &coalesce_opportunities_links_);
coalesce_worklist_.push(opportunity);
}
@@ -1332,7 +1356,7 @@
// Coalesce phi inputs with the corresponding output.
HInstruction* defined_by = interval->GetDefinedBy();
if (defined_by != nullptr && defined_by->IsPhi()) {
- const ArenaVector<HBasicBlock*>& predecessors = defined_by->GetBlock()->GetPredecessors();
+ ArrayRef<HBasicBlock* const> predecessors(defined_by->GetBlock()->GetPredecessors());
HInputsRef inputs = defined_by->GetInputs();
for (size_t i = 0, e = inputs.size(); i < e; ++i) {
@@ -1675,7 +1699,7 @@
// Add coalesce opportunities.
for (CoalesceOpportunity* opportunity : from->GetCoalesceOpportunities()) {
if (opportunity->stage != CoalesceStage::kDefunct) {
- into->AddCoalesceOpportunity(opportunity);
+ into->AddCoalesceOpportunity(opportunity, &coalesce_opportunities_links_);
}
}
EnableCoalesceOpportunities(from);
@@ -1729,7 +1753,7 @@
// Build a mask with a bit set for each register assigned to some
// interval in `intervals`.
template <typename Container>
-static std::bitset<kMaxNumRegs> BuildConflictMask(Container& intervals) {
+static std::bitset<kMaxNumRegs> BuildConflictMask(const Container& intervals) {
std::bitset<kMaxNumRegs> conflict_mask;
for (InterferenceNode* adjacent : intervals) {
LiveInterval* conflicting = adjacent->GetInterval();
@@ -1765,7 +1789,7 @@
bool ColoringIteration::ColorInterferenceGraph() {
DCHECK_LE(num_regs_, kMaxNumRegs) << "kMaxNumRegs is too small";
- ArenaVector<LiveInterval*> colored_intervals(
+ ScopedArenaVector<LiveInterval*> colored_intervals(
allocator_->Adapter(kArenaAllocRegisterAllocator));
bool successful = true;
@@ -1888,16 +1912,18 @@
return successful;
}
-void RegisterAllocatorGraphColor::AllocateSpillSlots(const ArenaVector<InterferenceNode*>& nodes) {
+void RegisterAllocatorGraphColor::AllocateSpillSlots(ArrayRef<InterferenceNode* const> nodes) {
// The register allocation resolver will organize the stack based on value type,
// so we assign stack slots for each value type separately.
- ArenaVector<LiveInterval*> double_intervals(allocator_->Adapter(kArenaAllocRegisterAllocator));
- ArenaVector<LiveInterval*> long_intervals(allocator_->Adapter(kArenaAllocRegisterAllocator));
- ArenaVector<LiveInterval*> float_intervals(allocator_->Adapter(kArenaAllocRegisterAllocator));
- ArenaVector<LiveInterval*> int_intervals(allocator_->Adapter(kArenaAllocRegisterAllocator));
+ ScopedArenaAllocator allocator(allocator_->GetArenaStack());
+ ScopedArenaAllocatorAdapter<void> adapter = allocator.Adapter(kArenaAllocRegisterAllocator);
+ ScopedArenaVector<LiveInterval*> double_intervals(adapter);
+ ScopedArenaVector<LiveInterval*> long_intervals(adapter);
+ ScopedArenaVector<LiveInterval*> float_intervals(adapter);
+ ScopedArenaVector<LiveInterval*> int_intervals(adapter);
// The set of parent intervals already handled.
- ArenaSet<LiveInterval*> seen(allocator_->Adapter(kArenaAllocRegisterAllocator));
+ ScopedArenaSet<LiveInterval*> seen(adapter);
// Find nodes that need spill slots.
for (InterferenceNode* node : nodes) {
@@ -1954,23 +1980,24 @@
}
// Color spill slots for each value type.
- ColorSpillSlots(&double_intervals, &num_double_spill_slots_);
- ColorSpillSlots(&long_intervals, &num_long_spill_slots_);
- ColorSpillSlots(&float_intervals, &num_float_spill_slots_);
- ColorSpillSlots(&int_intervals, &num_int_spill_slots_);
+ ColorSpillSlots(ArrayRef<LiveInterval* const>(double_intervals), &num_double_spill_slots_);
+ ColorSpillSlots(ArrayRef<LiveInterval* const>(long_intervals), &num_long_spill_slots_);
+ ColorSpillSlots(ArrayRef<LiveInterval* const>(float_intervals), &num_float_spill_slots_);
+ ColorSpillSlots(ArrayRef<LiveInterval* const>(int_intervals), &num_int_spill_slots_);
}
-void RegisterAllocatorGraphColor::ColorSpillSlots(ArenaVector<LiveInterval*>* intervals,
- size_t* num_stack_slots_used) {
+void RegisterAllocatorGraphColor::ColorSpillSlots(ArrayRef<LiveInterval* const> intervals,
+ /* out */ size_t* num_stack_slots_used) {
// We cannot use the original interference graph here because spill slots are assigned to
// all of the siblings of an interval, whereas an interference node represents only a single
// sibling. So, we assign spill slots linear-scan-style by sorting all the interval endpoints
// by position, and assigning the lowest spill slot available when we encounter an interval
// beginning. We ignore lifetime holes for simplicity.
- ArenaVector<std::tuple<size_t, bool, LiveInterval*>> interval_endpoints(
- allocator_->Adapter(kArenaAllocRegisterAllocator));
+ ScopedArenaAllocator allocator(allocator_->GetArenaStack());
+ ScopedArenaVector<std::tuple<size_t, bool, LiveInterval*>> interval_endpoints(
+ allocator.Adapter(kArenaAllocRegisterAllocator));
- for (LiveInterval* parent_interval : *intervals) {
+ for (LiveInterval* parent_interval : intervals) {
DCHECK(parent_interval->IsParent());
DCHECK(!parent_interval->HasSpillSlot());
size_t start = parent_interval->GetStart();
@@ -1990,7 +2017,7 @@
< std::tie(std::get<0>(rhs), std::get<1>(rhs));
});
- ArenaBitVector taken(allocator_, 0, true);
+ ArenaBitVector taken(&allocator, 0, true, kArenaAllocRegisterAllocator);
for (auto it = interval_endpoints.begin(), end = interval_endpoints.end(); it != end; ++it) {
// Extract information from the current tuple.
LiveInterval* parent_interval;
diff --git a/compiler/optimizing/register_allocator_graph_color.h b/compiler/optimizing/register_allocator_graph_color.h
index 3f6d674..3072c92 100644
--- a/compiler/optimizing/register_allocator_graph_color.h
+++ b/compiler/optimizing/register_allocator_graph_color.h
@@ -18,9 +18,10 @@
#define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_GRAPH_COLOR_H_
#include "arch/instruction_set.h"
-#include "base/arena_containers.h"
#include "base/arena_object.h"
+#include "base/array_ref.h"
#include "base/macros.h"
+#include "base/scoped_arena_containers.h"
#include "register_allocator.h"
namespace art {
@@ -85,11 +86,11 @@
*/
class RegisterAllocatorGraphColor : public RegisterAllocator {
public:
- RegisterAllocatorGraphColor(ArenaAllocator* allocator,
+ RegisterAllocatorGraphColor(ScopedArenaAllocator* allocator,
CodeGenerator* codegen,
const SsaLivenessAnalysis& analysis,
bool iterative_move_coalescing = true);
- ~RegisterAllocatorGraphColor() OVERRIDE {}
+ ~RegisterAllocatorGraphColor() OVERRIDE;
void AllocateRegisters() OVERRIDE;
@@ -141,11 +142,10 @@
// Assigns stack slots to a list of intervals, ensuring that interfering intervals are not
// assigned the same stack slot.
- void ColorSpillSlots(ArenaVector<LiveInterval*>* nodes,
- size_t* num_stack_slots_used);
+ void ColorSpillSlots(ArrayRef<LiveInterval* const> nodes, /* out */ size_t* num_stack_slots_used);
// Provide stack slots to nodes that need them.
- void AllocateSpillSlots(const ArenaVector<InterferenceNode*>& nodes);
+ void AllocateSpillSlots(ArrayRef<InterferenceNode* const> nodes);
// Whether iterative move coalescing should be performed. Iterative move coalescing
// improves code quality, but increases compile time.
@@ -154,19 +154,19 @@
// Live intervals, split by kind (core and floating point).
// These should not contain high intervals, as those are represented by
// the corresponding low interval throughout register allocation.
- ArenaVector<LiveInterval*> core_intervals_;
- ArenaVector<LiveInterval*> fp_intervals_;
+ ScopedArenaVector<LiveInterval*> core_intervals_;
+ ScopedArenaVector<LiveInterval*> fp_intervals_;
// Intervals for temporaries, saved for special handling in the resolution phase.
- ArenaVector<LiveInterval*> temp_intervals_;
+ ScopedArenaVector<LiveInterval*> temp_intervals_;
// Safepoints, saved for special handling while processing instructions.
- ArenaVector<HInstruction*> safepoints_;
+ ScopedArenaVector<HInstruction*> safepoints_;
// Interference nodes representing specific registers. These are "pre-colored" nodes
// in the interference graph.
- ArenaVector<InterferenceNode*> physical_core_nodes_;
- ArenaVector<InterferenceNode*> physical_fp_nodes_;
+ ScopedArenaVector<InterferenceNode*> physical_core_nodes_;
+ ScopedArenaVector<InterferenceNode*> physical_fp_nodes_;
// Allocated stack slot counters.
size_t num_int_spill_slots_;
diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc
index 9803a7b..cfe63bd 100644
--- a/compiler/optimizing/register_allocator_linear_scan.cc
+++ b/compiler/optimizing/register_allocator_linear_scan.cc
@@ -40,7 +40,7 @@
return GetHighForLowRegister(low->GetRegister()) != low->GetHighInterval()->GetRegister();
}
-RegisterAllocatorLinearScan::RegisterAllocatorLinearScan(ArenaAllocator* allocator,
+RegisterAllocatorLinearScan::RegisterAllocatorLinearScan(ScopedArenaAllocator* allocator,
CodeGenerator* codegen,
const SsaLivenessAnalysis& liveness)
: RegisterAllocator(allocator, codegen, liveness),
@@ -81,6 +81,8 @@
reserved_out_slots_ = ptr_size / kVRegSize + codegen->GetGraph()->GetMaximumNumberOfOutVRegs();
}
+RegisterAllocatorLinearScan::~RegisterAllocatorLinearScan() {}
+
static bool ShouldProcess(bool processing_core_registers, LiveInterval* interval) {
if (interval == nullptr) return false;
bool is_core_register = (interval->GetType() != DataType::Type::kFloat64)
@@ -90,7 +92,7 @@
void RegisterAllocatorLinearScan::AllocateRegisters() {
AllocateRegistersInternal();
- RegisterAllocationResolver(allocator_, codegen_, liveness_)
+ RegisterAllocationResolver(codegen_, liveness_)
.Resolve(ArrayRef<HInstruction* const>(safepoints_),
reserved_out_slots_,
int_spill_slots_.size(),
@@ -98,7 +100,7 @@
float_spill_slots_.size(),
double_spill_slots_.size(),
catch_phi_spill_slots_,
- temp_intervals_);
+ ArrayRef<LiveInterval* const>(temp_intervals_));
if (kIsDebugBuild) {
processing_core_registers_ = true;
@@ -298,7 +300,7 @@
LiveInterval* current = instruction->GetLiveInterval();
if (current == nullptr) return;
- ArenaVector<LiveInterval*>& unhandled = core_register
+ ScopedArenaVector<LiveInterval*>& unhandled = core_register
? unhandled_core_intervals_
: unhandled_fp_intervals_;
@@ -425,7 +427,9 @@
bool RegisterAllocatorLinearScan::ValidateInternal(bool log_fatal_on_failure) const {
// To simplify unit testing, we eagerly create the array of intervals, and
// call the helper method.
- ArenaVector<LiveInterval*> intervals(allocator_->Adapter(kArenaAllocRegisterAllocatorValidate));
+ ScopedArenaAllocator allocator(allocator_->GetArenaStack());
+ ScopedArenaVector<LiveInterval*> intervals(
+ allocator.Adapter(kArenaAllocRegisterAllocatorValidate));
for (size_t i = 0; i < liveness_.GetNumberOfSsaValues(); ++i) {
HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
if (ShouldProcess(processing_core_registers_, instruction->GetLiveInterval())) {
@@ -433,7 +437,7 @@
}
}
- const ArenaVector<LiveInterval*>* physical_register_intervals = processing_core_registers_
+ const ScopedArenaVector<LiveInterval*>* physical_register_intervals = processing_core_registers_
? &physical_core_register_intervals_
: &physical_fp_register_intervals_;
for (LiveInterval* fixed : *physical_register_intervals) {
@@ -448,8 +452,12 @@
}
}
- return ValidateIntervals(intervals, GetNumberOfSpillSlots(), reserved_out_slots_, *codegen_,
- allocator_, processing_core_registers_, log_fatal_on_failure);
+ return ValidateIntervals(ArrayRef<LiveInterval* const>(intervals),
+ GetNumberOfSpillSlots(),
+ reserved_out_slots_,
+ *codegen_,
+ processing_core_registers_,
+ log_fatal_on_failure);
}
void RegisterAllocatorLinearScan::DumpInterval(std::ostream& stream, LiveInterval* interval) const {
@@ -813,7 +821,7 @@
// Remove interval and its other half if any. Return iterator to the following element.
static ArenaVector<LiveInterval*>::iterator RemoveIntervalAndPotentialOtherHalf(
- ArenaVector<LiveInterval*>* intervals, ArenaVector<LiveInterval*>::iterator pos) {
+ ScopedArenaVector<LiveInterval*>* intervals, ScopedArenaVector<LiveInterval*>::iterator pos) {
DCHECK(intervals->begin() <= pos && pos < intervals->end());
LiveInterval* interval = *pos;
if (interval->IsLowInterval()) {
@@ -1044,7 +1052,8 @@
}
}
-void RegisterAllocatorLinearScan::AddSorted(ArenaVector<LiveInterval*>* array, LiveInterval* interval) {
+void RegisterAllocatorLinearScan::AddSorted(ScopedArenaVector<LiveInterval*>* array,
+ LiveInterval* interval) {
DCHECK(!interval->IsFixed() && !interval->HasSpillSlot());
size_t insert_at = 0;
for (size_t i = array->size(); i > 0; --i) {
@@ -1102,7 +1111,7 @@
return;
}
- ArenaVector<size_t>* spill_slots = nullptr;
+ ScopedArenaVector<size_t>* spill_slots = nullptr;
switch (interval->GetType()) {
case DataType::Type::kFloat64:
spill_slots = &double_spill_slots_;
diff --git a/compiler/optimizing/register_allocator_linear_scan.h b/compiler/optimizing/register_allocator_linear_scan.h
index 9c650a4..36788b7 100644
--- a/compiler/optimizing/register_allocator_linear_scan.h
+++ b/compiler/optimizing/register_allocator_linear_scan.h
@@ -18,7 +18,7 @@
#define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_LINEAR_SCAN_H_
#include "arch/instruction_set.h"
-#include "base/arena_containers.h"
+#include "base/scoped_arena_containers.h"
#include "base/macros.h"
#include "register_allocator.h"
@@ -39,10 +39,10 @@
*/
class RegisterAllocatorLinearScan : public RegisterAllocator {
public:
- RegisterAllocatorLinearScan(ArenaAllocator* allocator,
+ RegisterAllocatorLinearScan(ScopedArenaAllocator* allocator,
CodeGenerator* codegen,
const SsaLivenessAnalysis& analysis);
- ~RegisterAllocatorLinearScan() OVERRIDE {}
+ ~RegisterAllocatorLinearScan() OVERRIDE;
void AllocateRegisters() OVERRIDE;
@@ -70,7 +70,7 @@
bool AllocateBlockedReg(LiveInterval* interval);
// Add `interval` in the given sorted list.
- static void AddSorted(ArenaVector<LiveInterval*>* array, LiveInterval* interval);
+ static void AddSorted(ScopedArenaVector<LiveInterval*>* array, LiveInterval* interval);
// Returns whether `reg` is blocked by the code generator.
bool IsBlocked(int reg) const;
@@ -107,43 +107,43 @@
// List of intervals for core registers that must be processed, ordered by start
// position. Last entry is the interval that has the lowest start position.
// This list is initially populated before doing the linear scan.
- ArenaVector<LiveInterval*> unhandled_core_intervals_;
+ ScopedArenaVector<LiveInterval*> unhandled_core_intervals_;
// List of intervals for floating-point registers. Same comments as above.
- ArenaVector<LiveInterval*> unhandled_fp_intervals_;
+ ScopedArenaVector<LiveInterval*> unhandled_fp_intervals_;
// Currently processed list of unhandled intervals. Either `unhandled_core_intervals_`
// or `unhandled_fp_intervals_`.
- ArenaVector<LiveInterval*>* unhandled_;
+ ScopedArenaVector<LiveInterval*>* unhandled_;
// List of intervals that have been processed.
- ArenaVector<LiveInterval*> handled_;
+ ScopedArenaVector<LiveInterval*> handled_;
// List of intervals that are currently active when processing a new live interval.
// That is, they have a live range that spans the start of the new interval.
- ArenaVector<LiveInterval*> active_;
+ ScopedArenaVector<LiveInterval*> active_;
// List of intervals that are currently inactive when processing a new live interval.
// That is, they have a lifetime hole that spans the start of the new interval.
- ArenaVector<LiveInterval*> inactive_;
+ ScopedArenaVector<LiveInterval*> inactive_;
// Fixed intervals for physical registers. Such intervals cover the positions
// where an instruction requires a specific register.
- ArenaVector<LiveInterval*> physical_core_register_intervals_;
- ArenaVector<LiveInterval*> physical_fp_register_intervals_;
+ ScopedArenaVector<LiveInterval*> physical_core_register_intervals_;
+ ScopedArenaVector<LiveInterval*> physical_fp_register_intervals_;
// Intervals for temporaries. Such intervals cover the positions
// where an instruction requires a temporary.
- ArenaVector<LiveInterval*> temp_intervals_;
+ ScopedArenaVector<LiveInterval*> temp_intervals_;
// The spill slots allocated for live intervals. We ensure spill slots
// are typed to avoid (1) doing moves and swaps between two different kinds
// of registers, and (2) swapping between a single stack slot and a double
// stack slot. This simplifies the parallel move resolver.
- ArenaVector<size_t> int_spill_slots_;
- ArenaVector<size_t> long_spill_slots_;
- ArenaVector<size_t> float_spill_slots_;
- ArenaVector<size_t> double_spill_slots_;
+ ScopedArenaVector<size_t> int_spill_slots_;
+ ScopedArenaVector<size_t> long_spill_slots_;
+ ScopedArenaVector<size_t> float_spill_slots_;
+ ScopedArenaVector<size_t> double_spill_slots_;
// Spill slots allocated to catch phis. This category is special-cased because
// (1) slots are allocated prior to linear scan and in reverse linear order,
@@ -151,7 +151,7 @@
size_t catch_phi_spill_slots_;
// Instructions that need a safepoint.
- ArenaVector<HInstruction*> safepoints_;
+ ScopedArenaVector<HInstruction*> safepoints_;
// True if processing core registers. False if processing floating
// point registers.
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 59987e2..69ed8c7 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -38,12 +38,36 @@
// Note: the register allocator tests rely on the fact that constants have live
// intervals and registers get allocated to them.
-class RegisterAllocatorTest : public CommonCompilerTest {
+class RegisterAllocatorTest : public OptimizingUnitTest {
protected:
// These functions need to access private variables of LocationSummary, so we declare it
// as a member of RegisterAllocatorTest, which we make a friend class.
- static void SameAsFirstInputHint(Strategy strategy);
- static void ExpectedInRegisterHint(Strategy strategy);
+ void SameAsFirstInputHint(Strategy strategy);
+ void ExpectedInRegisterHint(Strategy strategy);
+
+ // Helper functions that make use of the OptimizingUnitTest's members.
+ bool Check(const uint16_t* data, Strategy strategy);
+ void CFG1(Strategy strategy);
+ void Loop1(Strategy strategy);
+ void Loop2(Strategy strategy);
+ void Loop3(Strategy strategy);
+ void DeadPhi(Strategy strategy);
+ HGraph* BuildIfElseWithPhi(HPhi** phi, HInstruction** input1, HInstruction** input2);
+ void PhiHint(Strategy strategy);
+ HGraph* BuildFieldReturn(HInstruction** field, HInstruction** ret);
+ HGraph* BuildTwoSubs(HInstruction** first_sub, HInstruction** second_sub);
+ HGraph* BuildDiv(HInstruction** div);
+ void ExpectedExactInRegisterAndSameOutputHint(Strategy strategy);
+
+ bool ValidateIntervals(const ScopedArenaVector<LiveInterval*>& intervals,
+ const CodeGenerator& codegen) {
+ return RegisterAllocator::ValidateIntervals(ArrayRef<LiveInterval* const>(intervals),
+ /* number_of_spill_slots */ 0u,
+ /* number_of_out_slots */ 0u,
+ codegen,
+ /* processing_core_registers */ true,
+ /* log_fatal_on_failure */ false);
+ }
};
// This macro should include all register allocation strategies that should be tested.
@@ -55,17 +79,15 @@
test_name(Strategy::kRegisterAllocatorGraphColor);\
}
-static bool Check(const uint16_t* data, Strategy strategy) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+bool RegisterAllocatorTest::Check(const uint16_t* data, Strategy strategy) {
+ HGraph* graph = CreateCFG(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
return register_allocator->Validate(false);
}
@@ -75,95 +97,82 @@
* tests are based on this validation method.
*/
TEST_F(RegisterAllocatorTest, ValidateIntervals) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
+ HGraph* graph = CreateGraph();
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- ArenaVector<LiveInterval*> intervals(allocator.Adapter());
+ ScopedArenaVector<LiveInterval*> intervals(GetScopedAllocator()->Adapter());
// Test with two intervals of the same range.
{
static constexpr size_t ranges[][2] = {{0, 42}};
- intervals.push_back(BuildInterval(ranges, arraysize(ranges), &allocator, 0));
- intervals.push_back(BuildInterval(ranges, arraysize(ranges), &allocator, 1));
- ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ intervals.push_back(BuildInterval(ranges, arraysize(ranges), GetScopedAllocator(), 0));
+ intervals.push_back(BuildInterval(ranges, arraysize(ranges), GetScopedAllocator(), 1));
+ ASSERT_TRUE(ValidateIntervals(intervals, codegen));
intervals[1]->SetRegister(0);
- ASSERT_FALSE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ ASSERT_FALSE(ValidateIntervals(intervals, codegen));
intervals.clear();
}
// Test with two non-intersecting intervals.
{
static constexpr size_t ranges1[][2] = {{0, 42}};
- intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0));
+ intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), GetScopedAllocator(), 0));
static constexpr size_t ranges2[][2] = {{42, 43}};
- intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
- ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), GetScopedAllocator(), 1));
+ ASSERT_TRUE(ValidateIntervals(intervals, codegen));
intervals[1]->SetRegister(0);
- ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ ASSERT_TRUE(ValidateIntervals(intervals, codegen));
intervals.clear();
}
// Test with two non-intersecting intervals, with one with a lifetime hole.
{
static constexpr size_t ranges1[][2] = {{0, 42}, {45, 48}};
- intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0));
+ intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), GetScopedAllocator(), 0));
static constexpr size_t ranges2[][2] = {{42, 43}};
- intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
- ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), GetScopedAllocator(), 1));
+ ASSERT_TRUE(ValidateIntervals(intervals, codegen));
intervals[1]->SetRegister(0);
- ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ ASSERT_TRUE(ValidateIntervals(intervals, codegen));
intervals.clear();
}
// Test with intersecting intervals.
{
static constexpr size_t ranges1[][2] = {{0, 42}, {44, 48}};
- intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0));
+ intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), GetScopedAllocator(), 0));
static constexpr size_t ranges2[][2] = {{42, 47}};
- intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
- ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), GetScopedAllocator(), 1));
+ ASSERT_TRUE(ValidateIntervals(intervals, codegen));
intervals[1]->SetRegister(0);
- ASSERT_FALSE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ ASSERT_FALSE(ValidateIntervals(intervals, codegen));
intervals.clear();
}
// Test with siblings.
{
static constexpr size_t ranges1[][2] = {{0, 42}, {44, 48}};
- intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0));
+ intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), GetScopedAllocator(), 0));
intervals[0]->SplitAt(43);
static constexpr size_t ranges2[][2] = {{42, 47}};
- intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
- ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), GetScopedAllocator(), 1));
+ ASSERT_TRUE(ValidateIntervals(intervals, codegen));
intervals[1]->SetRegister(0);
// Sibling of the first interval has no register allocated to it.
- ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ ASSERT_TRUE(ValidateIntervals(intervals, codegen));
intervals[0]->GetNextSibling()->SetRegister(0);
- ASSERT_FALSE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ ASSERT_FALSE(ValidateIntervals(intervals, codegen));
}
}
-static void CFG1(Strategy strategy) {
+void RegisterAllocatorTest::CFG1(Strategy strategy) {
/*
* Test the following snippet:
* return 0;
@@ -185,7 +194,7 @@
TEST_ALL_STRATEGIES(CFG1);
-static void Loop1(Strategy strategy) {
+void RegisterAllocatorTest::Loop1(Strategy strategy) {
/*
* Test the following snippet:
* int a = 0;
@@ -226,7 +235,7 @@
TEST_ALL_STRATEGIES(Loop1);
-static void Loop2(Strategy strategy) {
+void RegisterAllocatorTest::Loop2(Strategy strategy) {
/*
* Test the following snippet:
* int a = 0;
@@ -277,7 +286,7 @@
TEST_ALL_STRATEGIES(Loop2);
-static void Loop3(Strategy strategy) {
+void RegisterAllocatorTest::Loop3(Strategy strategy) {
/*
* Test the following snippet:
* int a = 0
@@ -314,16 +323,14 @@
Instruction::MOVE | 1 << 12 | 0 << 8,
Instruction::GOTO | 0xF900);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_TRUE(register_allocator->Validate(false));
@@ -351,13 +358,11 @@
Instruction::XOR_INT_LIT8 | 1 << 8, 1 << 8 | 1,
Instruction::RETURN_VOID);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
HXor* first_xor = graph->GetBlocks()[1]->GetFirstInstruction()->AsXor();
@@ -383,7 +388,7 @@
ASSERT_EQ(new_interval->FirstRegisterUse(), last_xor->GetLifetimePosition());
}
-static void DeadPhi(Strategy strategy) {
+void RegisterAllocatorTest::DeadPhi(Strategy strategy) {
/* Test for a dead loop phi taking as back-edge input a phi that also has
* this loop phi as input. Walking backwards in SsaDeadPhiElimination
* does not solve the problem because the loop phi will be visited last.
@@ -405,17 +410,15 @@
Instruction::GOTO | 0xFD00,
Instruction::RETURN_VOID);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
SsaDeadPhiElimination(graph).Run();
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_TRUE(register_allocator->Validate(false));
}
@@ -433,16 +436,14 @@
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
SsaDeadPhiElimination(graph).Run();
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
- RegisterAllocatorLinearScan register_allocator(&allocator, &codegen, liveness);
+ RegisterAllocatorLinearScan register_allocator(GetScopedAllocator(), &codegen, liveness);
// Add an artifical range to cover the temps that will be put in the unhandled list.
LiveInterval* unhandled = graph->GetEntryBlock()->GetFirstInstruction()->GetLiveInterval();
@@ -461,20 +462,21 @@
// Add three temps holding the same register, and starting at different positions.
// Put the one that should be picked in the middle of the inactive list to ensure
// we do not depend on an order.
- LiveInterval* interval = LiveInterval::MakeFixedInterval(&allocator, 0, DataType::Type::kInt32);
+ LiveInterval* interval =
+ LiveInterval::MakeFixedInterval(GetScopedAllocator(), 0, DataType::Type::kInt32);
interval->AddRange(40, 50);
register_allocator.inactive_.push_back(interval);
- interval = LiveInterval::MakeFixedInterval(&allocator, 0, DataType::Type::kInt32);
+ interval = LiveInterval::MakeFixedInterval(GetScopedAllocator(), 0, DataType::Type::kInt32);
interval->AddRange(20, 30);
register_allocator.inactive_.push_back(interval);
- interval = LiveInterval::MakeFixedInterval(&allocator, 0, DataType::Type::kInt32);
+ interval = LiveInterval::MakeFixedInterval(GetScopedAllocator(), 0, DataType::Type::kInt32);
interval->AddRange(60, 70);
register_allocator.inactive_.push_back(interval);
register_allocator.number_of_registers_ = 1;
- register_allocator.registers_array_ = allocator.AllocArray<size_t>(1);
+ register_allocator.registers_array_ = GetAllocator()->AllocArray<size_t>(1);
register_allocator.processing_core_registers_ = true;
register_allocator.unhandled_ = ®ister_allocator.unhandled_core_intervals_;
@@ -487,36 +489,35 @@
ASSERT_EQ(20u, register_allocator.unhandled_->front()->GetStart());
}
-static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator,
- HPhi** phi,
- HInstruction** input1,
- HInstruction** input2) {
- HGraph* graph = CreateGraph(allocator);
- HBasicBlock* entry = new (allocator) HBasicBlock(graph);
+HGraph* RegisterAllocatorTest::BuildIfElseWithPhi(HPhi** phi,
+ HInstruction** input1,
+ HInstruction** input2) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (allocator) HParameterValue(
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
entry->AddInstruction(parameter);
- HBasicBlock* block = new (allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
entry->AddSuccessor(block);
- HInstruction* test = new (allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(22),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0);
+ HInstruction* test = new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(22),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0);
block->AddInstruction(test);
- block->AddInstruction(new (allocator) HIf(test));
- HBasicBlock* then = new (allocator) HBasicBlock(graph);
- HBasicBlock* else_ = new (allocator) HBasicBlock(graph);
- HBasicBlock* join = new (allocator) HBasicBlock(graph);
+ block->AddInstruction(new (GetAllocator()) HIf(test));
+ HBasicBlock* then = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* else_ = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* join = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(then);
graph->AddBlock(else_);
graph->AddBlock(join);
@@ -525,32 +526,32 @@
block->AddSuccessor(else_);
then->AddSuccessor(join);
else_->AddSuccessor(join);
- then->AddInstruction(new (allocator) HGoto());
- else_->AddInstruction(new (allocator) HGoto());
+ then->AddInstruction(new (GetAllocator()) HGoto());
+ else_->AddInstruction(new (GetAllocator()) HGoto());
- *phi = new (allocator) HPhi(allocator, 0, 0, DataType::Type::kInt32);
+ *phi = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32);
join->AddPhi(*phi);
- *input1 = new (allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0);
- *input2 = new (allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0);
+ *input1 = new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kInt32,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0);
+ *input2 = new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kInt32,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0);
then->AddInstruction(*input1);
else_->AddInstruction(*input2);
- join->AddInstruction(new (allocator) HExit());
+ join->AddInstruction(new (GetAllocator()) HExit());
(*phi)->AddInput(*input1);
(*phi)->AddInput(*input2);
@@ -559,23 +560,21 @@
return graph;
}
-static void PhiHint(Strategy strategy) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
+void RegisterAllocatorTest::PhiHint(Strategy strategy) {
HPhi *phi;
HInstruction *input1, *input2;
{
- HGraph* graph = BuildIfElseWithPhi(&allocator, &phi, &input1, &input2);
+ HGraph* graph = BuildIfElseWithPhi(&phi, &input1, &input2);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// Check that the register allocator is deterministic.
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 0);
@@ -584,18 +583,18 @@
}
{
- HGraph* graph = BuildIfElseWithPhi(&allocator, &phi, &input1, &input2);
+ HGraph* graph = BuildIfElseWithPhi(&phi, &input1, &input2);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// Set the phi to a specific register, and check that the inputs get allocated
// the same register.
phi->GetLocations()->UpdateOut(Location::RegisterLocation(2));
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 2);
@@ -604,18 +603,18 @@
}
{
- HGraph* graph = BuildIfElseWithPhi(&allocator, &phi, &input1, &input2);
+ HGraph* graph = BuildIfElseWithPhi(&phi, &input1, &input2);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// Set input1 to a specific register, and check that the phi and other input get allocated
// the same register.
input1->GetLocations()->UpdateOut(Location::RegisterLocation(2));
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 2);
@@ -624,18 +623,18 @@
}
{
- HGraph* graph = BuildIfElseWithPhi(&allocator, &phi, &input1, &input2);
+ HGraph* graph = BuildIfElseWithPhi(&phi, &input1, &input2);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// Set input2 to a specific register, and check that the phi and other input get allocated
// the same register.
input2->GetLocations()->UpdateOut(Location::RegisterLocation(2));
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 2);
@@ -650,58 +649,54 @@
PhiHint(Strategy::kRegisterAllocatorLinearScan);
}
-static HGraph* BuildFieldReturn(ArenaAllocator* allocator,
- HInstruction** field,
- HInstruction** ret) {
- HGraph* graph = CreateGraph(allocator);
- HBasicBlock* entry = new (allocator) HBasicBlock(graph);
+HGraph* RegisterAllocatorTest::BuildFieldReturn(HInstruction** field, HInstruction** ret) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (allocator) HParameterValue(
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
entry->AddInstruction(parameter);
- HBasicBlock* block = new (allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
entry->AddSuccessor(block);
- *field = new (allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0);
+ *field = new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kInt32,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0);
block->AddInstruction(*field);
- *ret = new (allocator) HReturn(*field);
+ *ret = new (GetAllocator()) HReturn(*field);
block->AddInstruction(*ret);
- HBasicBlock* exit = new (allocator) HBasicBlock(graph);
+ HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(exit);
block->AddSuccessor(exit);
- exit->AddInstruction(new (allocator) HExit());
+ exit->AddInstruction(new (GetAllocator()) HExit());
graph->BuildDominatorTree();
return graph;
}
void RegisterAllocatorTest::ExpectedInRegisterHint(Strategy strategy) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
HInstruction *field, *ret;
{
- HGraph* graph = BuildFieldReturn(&allocator, &field, &ret);
+ HGraph* graph = BuildFieldReturn(&field, &ret);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
// Sanity check that in normal conditions, the register should be hinted to 0 (EAX).
@@ -709,19 +704,19 @@
}
{
- HGraph* graph = BuildFieldReturn(&allocator, &field, &ret);
+ HGraph* graph = BuildFieldReturn(&field, &ret);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// Check that the field gets put in the register expected by its use.
// Don't use SetInAt because we are overriding an already allocated location.
ret->GetLocations()->inputs_[0] = Location::RegisterLocation(2);
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(field->GetLiveInterval()->GetRegister(), 2);
@@ -734,50 +729,46 @@
ExpectedInRegisterHint(Strategy::kRegisterAllocatorLinearScan);
}
-static HGraph* BuildTwoSubs(ArenaAllocator* allocator,
- HInstruction** first_sub,
- HInstruction** second_sub) {
- HGraph* graph = CreateGraph(allocator);
- HBasicBlock* entry = new (allocator) HBasicBlock(graph);
+HGraph* RegisterAllocatorTest::BuildTwoSubs(HInstruction** first_sub, HInstruction** second_sub) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (allocator) HParameterValue(
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32);
entry->AddInstruction(parameter);
HInstruction* constant1 = graph->GetIntConstant(1);
HInstruction* constant2 = graph->GetIntConstant(2);
- HBasicBlock* block = new (allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
entry->AddSuccessor(block);
- *first_sub = new (allocator) HSub(DataType::Type::kInt32, parameter, constant1);
+ *first_sub = new (GetAllocator()) HSub(DataType::Type::kInt32, parameter, constant1);
block->AddInstruction(*first_sub);
- *second_sub = new (allocator) HSub(DataType::Type::kInt32, *first_sub, constant2);
+ *second_sub = new (GetAllocator()) HSub(DataType::Type::kInt32, *first_sub, constant2);
block->AddInstruction(*second_sub);
- block->AddInstruction(new (allocator) HExit());
+ block->AddInstruction(new (GetAllocator()) HExit());
graph->BuildDominatorTree();
return graph;
}
void RegisterAllocatorTest::SameAsFirstInputHint(Strategy strategy) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
HInstruction *first_sub, *second_sub;
{
- HGraph* graph = BuildTwoSubs(&allocator, &first_sub, &second_sub);
+ HGraph* graph = BuildTwoSubs(&first_sub, &second_sub);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
// Sanity check that in normal conditions, the registers are the same.
@@ -786,11 +777,11 @@
}
{
- HGraph* graph = BuildTwoSubs(&allocator, &first_sub, &second_sub);
+ HGraph* graph = BuildTwoSubs(&first_sub, &second_sub);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// check that both adds get the same register.
@@ -799,8 +790,8 @@
ASSERT_EQ(first_sub->GetLocations()->Out().GetPolicy(), Location::kSameAsFirstInput);
ASSERT_EQ(second_sub->GetLocations()->Out().GetPolicy(), Location::kSameAsFirstInput);
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(first_sub->GetLiveInterval()->GetRegister(), 2);
@@ -814,53 +805,47 @@
SameAsFirstInputHint(Strategy::kRegisterAllocatorLinearScan);
}
-static HGraph* BuildDiv(ArenaAllocator* allocator,
- HInstruction** div) {
- HGraph* graph = CreateGraph(allocator);
- HBasicBlock* entry = new (allocator) HBasicBlock(graph);
+HGraph* RegisterAllocatorTest::BuildDiv(HInstruction** div) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* first = new (allocator) HParameterValue(
+ HInstruction* first = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32);
- HInstruction* second = new (allocator) HParameterValue(
+ HInstruction* second = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32);
entry->AddInstruction(first);
entry->AddInstruction(second);
- HBasicBlock* block = new (allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
entry->AddSuccessor(block);
- *div =
- new (allocator) HDiv(DataType::Type::kInt32, first, second, 0); // don't care about dex_pc.
+ *div = new (GetAllocator()) HDiv(
+ DataType::Type::kInt32, first, second, 0); // don't care about dex_pc.
block->AddInstruction(*div);
- block->AddInstruction(new (allocator) HExit());
+ block->AddInstruction(new (GetAllocator()) HExit());
graph->BuildDominatorTree();
return graph;
}
-static void ExpectedExactInRegisterAndSameOutputHint(Strategy strategy) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
+void RegisterAllocatorTest::ExpectedExactInRegisterAndSameOutputHint(Strategy strategy) {
HInstruction *div;
+ HGraph* graph = BuildDiv(&div);
+ std::unique_ptr<const X86InstructionSetFeatures> features_x86(
+ X86InstructionSetFeatures::FromCppDefines());
+ x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
+ liveness.Analyze();
- {
- HGraph* graph = BuildDiv(&allocator, &div);
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
- liveness.Analyze();
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
+ register_allocator->AllocateRegisters();
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
- register_allocator->AllocateRegisters();
-
- // div on x86 requires its first input in eax and the output be the same as the first input.
- ASSERT_EQ(div->GetLiveInterval()->GetRegister(), 0);
- }
+ // div on x86 requires its first input in eax and the output be the same as the first input.
+ ASSERT_EQ(div->GetLiveInterval()->GetRegister(), 0);
}
// TODO: Enable this test for graph coloring register allocation when iterative move
@@ -874,59 +859,57 @@
// position.
// This test only applies to the linear scan allocator.
TEST_F(RegisterAllocatorTest, SpillInactive) {
- ArenaPool pool;
-
// Create a synthesized graph to please the register_allocator and
// ssa_liveness_analysis code.
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* one = new (&allocator) HParameterValue(
+ HInstruction* one = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32);
- HInstruction* two = new (&allocator) HParameterValue(
+ HInstruction* two = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32);
- HInstruction* three = new (&allocator) HParameterValue(
+ HInstruction* three = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32);
- HInstruction* four = new (&allocator) HParameterValue(
+ HInstruction* four = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32);
entry->AddInstruction(one);
entry->AddInstruction(two);
entry->AddInstruction(three);
entry->AddInstruction(four);
- HBasicBlock* block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
entry->AddSuccessor(block);
- block->AddInstruction(new (&allocator) HExit());
+ block->AddInstruction(new (GetAllocator()) HExit());
// We create a synthesized user requesting a register, to avoid just spilling the
// intervals.
- HPhi* user = new (&allocator) HPhi(&allocator, 0, 1, DataType::Type::kInt32);
+ HPhi* user = new (GetAllocator()) HPhi(GetAllocator(), 0, 1, DataType::Type::kInt32);
user->AddInput(one);
user->SetBlock(block);
- LocationSummary* locations = new (&allocator) LocationSummary(user, LocationSummary::kNoCall);
+ LocationSummary* locations = new (GetAllocator()) LocationSummary(user, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
static constexpr size_t phi_ranges[][2] = {{20, 30}};
- BuildInterval(phi_ranges, arraysize(phi_ranges), &allocator, -1, user);
+ BuildInterval(phi_ranges, arraysize(phi_ranges), GetScopedAllocator(), -1, user);
// Create an interval with lifetime holes.
static constexpr size_t ranges1[][2] = {{0, 2}, {4, 6}, {8, 10}};
- LiveInterval* first = BuildInterval(ranges1, arraysize(ranges1), &allocator, -1, one);
- first->uses_.push_front(*new(&allocator) UsePosition(user, false, 8));
- first->uses_.push_front(*new(&allocator) UsePosition(user, false, 7));
- first->uses_.push_front(*new(&allocator) UsePosition(user, false, 6));
+ LiveInterval* first = BuildInterval(ranges1, arraysize(ranges1), GetScopedAllocator(), -1, one);
+ first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 8));
+ first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 7));
+ first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 6));
- locations = new (&allocator) LocationSummary(first->GetDefinedBy(), LocationSummary::kNoCall);
+ locations = new (GetAllocator()) LocationSummary(first->GetDefinedBy(), LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
first = first->SplitAt(1);
// Create an interval that conflicts with the next interval, to force the next
// interval to call `AllocateBlockedReg`.
static constexpr size_t ranges2[][2] = {{2, 4}};
- LiveInterval* second = BuildInterval(ranges2, arraysize(ranges2), &allocator, -1, two);
- locations = new (&allocator) LocationSummary(second->GetDefinedBy(), LocationSummary::kNoCall);
+ LiveInterval* second = BuildInterval(ranges2, arraysize(ranges2), GetScopedAllocator(), -1, two);
+ locations =
+ new (GetAllocator()) LocationSummary(second->GetDefinedBy(), LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
// Create an interval that will lead to splitting the first interval. The bug occured
@@ -935,31 +918,32 @@
// "[0, 2(, [4, 6(" in the list of handled intervals, even though we haven't processed intervals
// before lifetime position 6 yet.
static constexpr size_t ranges3[][2] = {{2, 4}, {8, 10}};
- LiveInterval* third = BuildInterval(ranges3, arraysize(ranges3), &allocator, -1, three);
- third->uses_.push_front(*new(&allocator) UsePosition(user, false, 8));
- third->uses_.push_front(*new(&allocator) UsePosition(user, false, 4));
- third->uses_.push_front(*new(&allocator) UsePosition(user, false, 3));
- locations = new (&allocator) LocationSummary(third->GetDefinedBy(), LocationSummary::kNoCall);
+ LiveInterval* third = BuildInterval(ranges3, arraysize(ranges3), GetScopedAllocator(), -1, three);
+ third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 8));
+ third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 4));
+ third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 3));
+ locations = new (GetAllocator()) LocationSummary(third->GetDefinedBy(), LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
third = third->SplitAt(3);
// Because the first part of the split interval was considered handled, this interval
// was free to allocate the same register, even though it conflicts with it.
static constexpr size_t ranges4[][2] = {{4, 6}};
- LiveInterval* fourth = BuildInterval(ranges4, arraysize(ranges4), &allocator, -1, four);
- locations = new (&allocator) LocationSummary(fourth->GetDefinedBy(), LocationSummary::kNoCall);
+ LiveInterval* fourth = BuildInterval(ranges4, arraysize(ranges4), GetScopedAllocator(), -1, four);
+ locations =
+ new (GetAllocator()) LocationSummary(fourth->GetDefinedBy(), LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
// Populate the instructions in the liveness object, to please the register allocator.
for (size_t i = 0; i < 32; ++i) {
liveness.instructions_from_lifetime_position_.push_back(user);
}
- RegisterAllocatorLinearScan register_allocator(&allocator, &codegen, liveness);
+ RegisterAllocatorLinearScan register_allocator(GetScopedAllocator(), &codegen, liveness);
register_allocator.unhandled_core_intervals_.push_back(fourth);
register_allocator.unhandled_core_intervals_.push_back(third);
register_allocator.unhandled_core_intervals_.push_back(second);
@@ -967,19 +951,18 @@
// Set just one register available to make all intervals compete for the same.
register_allocator.number_of_registers_ = 1;
- register_allocator.registers_array_ = allocator.AllocArray<size_t>(1);
+ register_allocator.registers_array_ = GetAllocator()->AllocArray<size_t>(1);
register_allocator.processing_core_registers_ = true;
register_allocator.unhandled_ = ®ister_allocator.unhandled_core_intervals_;
register_allocator.LinearScan();
// Test that there is no conflicts between intervals.
- ArenaVector<LiveInterval*> intervals(allocator.Adapter());
+ ScopedArenaVector<LiveInterval*> intervals(GetScopedAllocator()->Adapter());
intervals.push_back(first);
intervals.push_back(second);
intervals.push_back(third);
intervals.push_back(fourth);
- ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ ASSERT_TRUE(ValidateIntervals(intervals, codegen));
}
} // namespace art
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index 5212e86..c673d54 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -18,6 +18,8 @@
#include "scheduler.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
#include "data_type-inl.h"
#include "prepare_for_register_allocation.h"
@@ -442,7 +444,7 @@
}
void SchedulingGraph::DumpAsDotGraph(const std::string& description,
- const ArenaVector<SchedulingNode*>& initial_candidates) {
+ const ScopedArenaVector<SchedulingNode*>& initial_candidates) {
// TODO(xueliang): ideally we should move scheduling information into HInstruction, after that
// we should move this dotty graph dump feature to visualizer, and have a compiler option for it.
std::ofstream output("scheduling_graphs.dot", std::ofstream::out | std::ofstream::app);
@@ -451,7 +453,7 @@
// Start the dot graph. Use an increasing index for easier differentiation.
output << "digraph G {\n";
for (const auto& entry : nodes_map_) {
- SchedulingNode* node = entry.second;
+ SchedulingNode* node = entry.second.get();
DumpAsDotNode(output, node);
}
// Create a fake 'end_of_scheduling' node to help visualization of critical_paths.
@@ -466,7 +468,7 @@
}
SchedulingNode* CriticalPathSchedulingNodeSelector::SelectMaterializedCondition(
- ArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) const {
+ ScopedArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) const {
// Schedule condition inputs that can be materialized immediately before their use.
// In following example, after we've scheduled HSelect, we want LessThan to be scheduled
// immediately, because it is a materialized condition, and will be emitted right before HSelect
@@ -506,7 +508,7 @@
}
SchedulingNode* CriticalPathSchedulingNodeSelector::PopHighestPriorityNode(
- ArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) {
+ ScopedArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) {
DCHECK(!nodes->empty());
SchedulingNode* select_node = nullptr;
@@ -562,7 +564,7 @@
}
void HScheduler::Schedule(HBasicBlock* block) {
- ArenaVector<SchedulingNode*> scheduling_nodes(arena_->Adapter(kArenaAllocScheduler));
+ ScopedArenaVector<SchedulingNode*> scheduling_nodes(allocator_->Adapter(kArenaAllocScheduler));
// Build the scheduling graph.
scheduling_graph_.Clear();
@@ -593,7 +595,7 @@
}
}
- ArenaVector<SchedulingNode*> initial_candidates(arena_->Adapter(kArenaAllocScheduler));
+ ScopedArenaVector<SchedulingNode*> initial_candidates(allocator_->Adapter(kArenaAllocScheduler));
if (kDumpDotSchedulingGraphs) {
// Remember the list of initial candidates for debug output purposes.
initial_candidates.assign(candidates_.begin(), candidates_.end());
@@ -779,7 +781,7 @@
#if defined(ART_ENABLE_CODEGEN_arm64) || defined(ART_ENABLE_CODEGEN_arm)
// Phase-local allocator that allocates scheduler internal data structures like
// scheduling nodes, internel nodes map, dependencies, etc.
- ArenaAllocator arena_allocator(graph_->GetArena()->GetArenaPool());
+ ScopedArenaAllocator arena_allocator(graph_->GetArenaStack());
CriticalPathSchedulingNodeSelector critical_path_selector;
RandomSchedulingNodeSelector random_selector;
SchedulingNodeSelector* selector = schedule_randomly
diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h
index 66ffac5..3efd26a 100644
--- a/compiler/optimizing/scheduler.h
+++ b/compiler/optimizing/scheduler.h
@@ -19,6 +19,8 @@
#include <fstream>
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
#include "base/time_utils.h"
#include "code_generator.h"
#include "driver/compiler_driver.h"
@@ -152,16 +154,16 @@
/**
* A node representing an `HInstruction` in the `SchedulingGraph`.
*/
-class SchedulingNode : public ArenaObject<kArenaAllocScheduler> {
+class SchedulingNode : public DeletableArenaObject<kArenaAllocScheduler> {
public:
- SchedulingNode(HInstruction* instr, ArenaAllocator* arena, bool is_scheduling_barrier)
+ SchedulingNode(HInstruction* instr, ScopedArenaAllocator* allocator, bool is_scheduling_barrier)
: latency_(0),
internal_latency_(0),
critical_path_(0),
instruction_(instr),
is_scheduling_barrier_(is_scheduling_barrier),
- data_predecessors_(arena->Adapter(kArenaAllocScheduler)),
- other_predecessors_(arena->Adapter(kArenaAllocScheduler)),
+ data_predecessors_(allocator->Adapter(kArenaAllocScheduler)),
+ other_predecessors_(allocator->Adapter(kArenaAllocScheduler)),
num_unscheduled_successors_(0) {
data_predecessors_.reserve(kPreallocatedPredecessors);
}
@@ -171,11 +173,19 @@
predecessor->num_unscheduled_successors_++;
}
+ const ScopedArenaVector<SchedulingNode*>& GetDataPredecessors() const {
+ return data_predecessors_;
+ }
+
void AddOtherPredecessor(SchedulingNode* predecessor) {
other_predecessors_.push_back(predecessor);
predecessor->num_unscheduled_successors_++;
}
+ const ScopedArenaVector<SchedulingNode*>& GetOtherPredecessors() const {
+ return other_predecessors_;
+ }
+
void DecrementNumberOfUnscheduledSuccessors() {
num_unscheduled_successors_--;
}
@@ -195,8 +205,6 @@
void SetInternalLatency(uint32_t internal_latency) { internal_latency_ = internal_latency; }
uint32_t GetCriticalPath() const { return critical_path_; }
bool IsSchedulingBarrier() const { return is_scheduling_barrier_; }
- const ArenaVector<SchedulingNode*>& GetDataPredecessors() const { return data_predecessors_; }
- const ArenaVector<SchedulingNode*>& GetOtherPredecessors() const { return other_predecessors_; }
private:
// The latency of this node. It represents the latency between the moment the
@@ -227,8 +235,8 @@
// Predecessors in `data_predecessors_` are data dependencies. Those in
// `other_predecessors_` contain side-effect dependencies, environment
// dependencies, and scheduling barrier dependencies.
- ArenaVector<SchedulingNode*> data_predecessors_;
- ArenaVector<SchedulingNode*> other_predecessors_;
+ ScopedArenaVector<SchedulingNode*> data_predecessors_;
+ ScopedArenaVector<SchedulingNode*> other_predecessors_;
// The number of unscheduled successors for this node. This number is
// decremented as successors are scheduled. When it reaches zero this node
@@ -243,19 +251,21 @@
*/
class SchedulingGraph : public ValueObject {
public:
- SchedulingGraph(const HScheduler* scheduler, ArenaAllocator* arena)
+ SchedulingGraph(const HScheduler* scheduler, ScopedArenaAllocator* allocator)
: scheduler_(scheduler),
- arena_(arena),
+ arena_(allocator),
contains_scheduling_barrier_(false),
nodes_map_(arena_->Adapter(kArenaAllocScheduler)),
heap_location_collector_(nullptr) {}
SchedulingNode* AddNode(HInstruction* instr, bool is_scheduling_barrier = false) {
- SchedulingNode* node = new (arena_) SchedulingNode(instr, arena_, is_scheduling_barrier);
- nodes_map_.Insert(std::make_pair(instr, node));
+ std::unique_ptr<SchedulingNode> node(
+ new (arena_) SchedulingNode(instr, arena_, is_scheduling_barrier));
+ SchedulingNode* result = node.get();
+ nodes_map_.Insert(std::make_pair(instr, std::move(node)));
contains_scheduling_barrier_ |= is_scheduling_barrier;
AddDependencies(instr, is_scheduling_barrier);
- return node;
+ return result;
}
void Clear() {
@@ -272,7 +282,7 @@
if (it == nodes_map_.end()) {
return nullptr;
} else {
- return it->second;
+ return it->second.get();
}
}
@@ -290,7 +300,7 @@
// Dump the scheduling graph, in dot file format, appending it to the file
// `scheduling_graphs.dot`.
void DumpAsDotGraph(const std::string& description,
- const ArenaVector<SchedulingNode*>& initial_candidates);
+ const ScopedArenaVector<SchedulingNode*>& initial_candidates);
protected:
void AddDependency(SchedulingNode* node, SchedulingNode* dependency, bool is_data_dependency);
@@ -313,11 +323,11 @@
const HScheduler* const scheduler_;
- ArenaAllocator* const arena_;
+ ScopedArenaAllocator* const arena_;
bool contains_scheduling_barrier_;
- ArenaHashMap<const HInstruction*, SchedulingNode*> nodes_map_;
+ ScopedArenaHashMap<const HInstruction*, std::unique_ptr<SchedulingNode>> nodes_map_;
const HeapLocationCollector* heap_location_collector_;
};
@@ -367,11 +377,11 @@
class SchedulingNodeSelector : public ArenaObject<kArenaAllocScheduler> {
public:
- virtual SchedulingNode* PopHighestPriorityNode(ArenaVector<SchedulingNode*>* nodes,
+ virtual SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes,
const SchedulingGraph& graph) = 0;
virtual ~SchedulingNodeSelector() {}
protected:
- static void DeleteNodeAtIndex(ArenaVector<SchedulingNode*>* nodes, size_t index) {
+ static void DeleteNodeAtIndex(ScopedArenaVector<SchedulingNode*>* nodes, size_t index) {
(*nodes)[index] = nodes->back();
nodes->pop_back();
}
@@ -387,7 +397,7 @@
srand(seed_);
}
- SchedulingNode* PopHighestPriorityNode(ArenaVector<SchedulingNode*>* nodes,
+ SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes,
const SchedulingGraph& graph) OVERRIDE {
UNUSED(graph);
DCHECK(!nodes->empty());
@@ -408,15 +418,15 @@
public:
CriticalPathSchedulingNodeSelector() : prev_select_(nullptr) {}
- SchedulingNode* PopHighestPriorityNode(ArenaVector<SchedulingNode*>* nodes,
+ SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes,
const SchedulingGraph& graph) OVERRIDE;
protected:
SchedulingNode* GetHigherPrioritySchedulingNode(SchedulingNode* candidate,
SchedulingNode* check) const;
- SchedulingNode* SelectMaterializedCondition(ArenaVector<SchedulingNode*>* nodes,
- const SchedulingGraph& graph) const;
+ SchedulingNode* SelectMaterializedCondition(ScopedArenaVector<SchedulingNode*>* nodes,
+ const SchedulingGraph& graph) const;
private:
const SchedulingNode* prev_select_;
@@ -424,16 +434,16 @@
class HScheduler {
public:
- HScheduler(ArenaAllocator* arena,
+ HScheduler(ScopedArenaAllocator* allocator,
SchedulingLatencyVisitor* latency_visitor,
SchedulingNodeSelector* selector)
- : arena_(arena),
+ : allocator_(allocator),
latency_visitor_(latency_visitor),
selector_(selector),
only_optimize_loop_blocks_(true),
- scheduling_graph_(this, arena),
+ scheduling_graph_(this, allocator),
cursor_(nullptr),
- candidates_(arena_->Adapter(kArenaAllocScheduler)) {}
+ candidates_(allocator_->Adapter(kArenaAllocScheduler)) {}
virtual ~HScheduler() {}
void Schedule(HGraph* graph);
@@ -461,7 +471,7 @@
node->SetInternalLatency(latency_visitor_->GetLastVisitedInternalLatency());
}
- ArenaAllocator* const arena_;
+ ScopedArenaAllocator* const allocator_;
SchedulingLatencyVisitor* const latency_visitor_;
SchedulingNodeSelector* const selector_;
bool only_optimize_loop_blocks_;
@@ -473,7 +483,7 @@
HInstruction* cursor_;
// The list of candidates for scheduling. A node becomes a candidate when all
// its predecessors have been scheduled.
- ArenaVector<SchedulingNode*> candidates_;
+ ScopedArenaVector<SchedulingNode*> candidates_;
private:
DISALLOW_COPY_AND_ASSIGN(HScheduler);
diff --git a/compiler/optimizing/scheduler_arm.h b/compiler/optimizing/scheduler_arm.h
index fe274d2..0cb8684 100644
--- a/compiler/optimizing/scheduler_arm.h
+++ b/compiler/optimizing/scheduler_arm.h
@@ -137,10 +137,10 @@
class HSchedulerARM : public HScheduler {
public:
- HSchedulerARM(ArenaAllocator* arena,
+ HSchedulerARM(ScopedArenaAllocator* allocator,
SchedulingNodeSelector* selector,
SchedulingLatencyVisitorARM* arm_latency_visitor)
- : HScheduler(arena, arm_latency_visitor, selector) {}
+ : HScheduler(allocator, arm_latency_visitor, selector) {}
~HSchedulerARM() OVERRIDE {}
bool IsSchedulable(const HInstruction* instruction) const OVERRIDE {
diff --git a/compiler/optimizing/scheduler_arm64.h b/compiler/optimizing/scheduler_arm64.h
index e1a80ec..32f161f 100644
--- a/compiler/optimizing/scheduler_arm64.h
+++ b/compiler/optimizing/scheduler_arm64.h
@@ -131,8 +131,8 @@
class HSchedulerARM64 : public HScheduler {
public:
- HSchedulerARM64(ArenaAllocator* arena, SchedulingNodeSelector* selector)
- : HScheduler(arena, &arm64_latency_visitor_, selector) {}
+ HSchedulerARM64(ScopedArenaAllocator* allocator, SchedulingNodeSelector* selector)
+ : HScheduler(allocator, &arm64_latency_visitor_, selector) {}
~HSchedulerARM64() OVERRIDE {}
bool IsSchedulable(const HInstruction* instruction) const OVERRIDE {
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index 0e6e0c5..dfc1633 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -71,16 +71,14 @@
return v;
}
-class SchedulerTest : public CommonCompilerTest {
+class SchedulerTest : public OptimizingUnitTest {
public:
- SchedulerTest() : pool_(), allocator_(&pool_) {
- graph_ = CreateGraph(&allocator_);
- }
+ SchedulerTest() : graph_(CreateGraph()) { }
// Build scheduling graph, and run target specific scheduling on it.
void TestBuildDependencyGraphAndSchedule(HScheduler* scheduler) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
- HBasicBlock* block1 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* block1 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->AddBlock(block1);
graph_->SetEntryBlock(entry);
@@ -100,23 +98,25 @@
// array_get2 ArrayGet [array, add1]
// array_set2 ArraySet [array, add1, add2]
- HInstruction* array = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kReference);
+ HInstruction* array = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kReference);
HInstruction* c1 = graph_->GetIntConstant(1);
HInstruction* c2 = graph_->GetIntConstant(10);
- HInstruction* add1 = new (&allocator_) HAdd(DataType::Type::kInt32, c1, c2);
- HInstruction* add2 = new (&allocator_) HAdd(DataType::Type::kInt32, add1, c2);
- HInstruction* mul = new (&allocator_) HMul(DataType::Type::kInt32, add1, add2);
- HInstruction* div_check = new (&allocator_) HDivZeroCheck(add2, 0);
- HInstruction* div = new (&allocator_) HDiv(DataType::Type::kInt32, add1, div_check, 0);
- HInstruction* array_get1 = new (&allocator_) HArrayGet(array, add1, DataType::Type::kInt32, 0);
+ HInstruction* add1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, c1, c2);
+ HInstruction* add2 = new (GetAllocator()) HAdd(DataType::Type::kInt32, add1, c2);
+ HInstruction* mul = new (GetAllocator()) HMul(DataType::Type::kInt32, add1, add2);
+ HInstruction* div_check = new (GetAllocator()) HDivZeroCheck(add2, 0);
+ HInstruction* div = new (GetAllocator()) HDiv(DataType::Type::kInt32, add1, div_check, 0);
+ HInstruction* array_get1 =
+ new (GetAllocator()) HArrayGet(array, add1, DataType::Type::kInt32, 0);
HInstruction* array_set1 =
- new (&allocator_) HArraySet(array, add1, add2, DataType::Type::kInt32, 0);
- HInstruction* array_get2 = new (&allocator_) HArrayGet(array, add1, DataType::Type::kInt32, 0);
+ new (GetAllocator()) HArraySet(array, add1, add2, DataType::Type::kInt32, 0);
+ HInstruction* array_get2 =
+ new (GetAllocator()) HArrayGet(array, add1, DataType::Type::kInt32, 0);
HInstruction* array_set2 =
- new (&allocator_) HArraySet(array, add1, add2, DataType::Type::kInt32, 0);
+ new (GetAllocator()) HArraySet(array, add1, add2, DataType::Type::kInt32, 0);
DCHECK(div_check->CanThrow());
@@ -135,18 +135,18 @@
block1->AddInstruction(instr);
}
- HEnvironment* environment = new (&allocator_) HEnvironment(&allocator_,
- 2,
- graph_->GetArtMethod(),
- 0,
- div_check);
+ HEnvironment* environment = new (GetAllocator()) HEnvironment(GetAllocator(),
+ 2,
+ graph_->GetArtMethod(),
+ 0,
+ div_check);
div_check->SetRawEnvironment(environment);
environment->SetRawEnvAt(0, add2);
add2->AddEnvUseAt(div_check->GetEnvironment(), 0);
environment->SetRawEnvAt(1, mul);
mul->AddEnvUseAt(div_check->GetEnvironment(), 1);
- SchedulingGraph scheduling_graph(scheduler, graph_->GetArena());
+ SchedulingGraph scheduling_graph(scheduler, GetScopedAllocator());
// Instructions must be inserted in reverse order into the scheduling graph.
for (HInstruction* instr : ReverseRange(block_instructions)) {
scheduling_graph.AddNode(instr);
@@ -184,7 +184,7 @@
void CompileWithRandomSchedulerAndRun(const uint16_t* data, bool has_result, int expected) {
for (CodegenTargetConfig target_config : GetTargetConfigs()) {
- HGraph* graph = CreateCFG(&allocator_, data);
+ HGraph* graph = CreateCFG(data);
// Schedule the graph randomly.
HInstructionScheduling scheduling(graph, target_config.GetInstructionSet());
@@ -198,55 +198,57 @@
}
void TestDependencyGraphOnAliasingArrayAccesses(HScheduler* scheduler) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
graph_->BuildDominatorTree();
- HInstruction* arr = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kReference);
- HInstruction* i = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(1),
- 1,
- DataType::Type::kInt32);
- HInstruction* j = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(1),
- 1,
- DataType::Type::kInt32);
- HInstruction* object = new (&allocator_) HParameterValue(graph_->GetDexFile(),
+ HInstruction* arr = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
dex::TypeIndex(0),
0,
DataType::Type::kReference);
+ HInstruction* i = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(1),
+ 1,
+ DataType::Type::kInt32);
+ HInstruction* j = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(1),
+ 1,
+ DataType::Type::kInt32);
+ HInstruction* object = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kReference);
HInstruction* c0 = graph_->GetIntConstant(0);
HInstruction* c1 = graph_->GetIntConstant(1);
- HInstruction* add0 = new (&allocator_) HAdd(DataType::Type::kInt32, i, c0);
- HInstruction* add1 = new (&allocator_) HAdd(DataType::Type::kInt32, i, c1);
- HInstruction* sub0 = new (&allocator_) HSub(DataType::Type::kInt32, i, c0);
- HInstruction* sub1 = new (&allocator_) HSub(DataType::Type::kInt32, i, c1);
- HInstruction* arr_set_0 = new (&allocator_) HArraySet(arr, c0, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set_1 = new (&allocator_) HArraySet(arr, c1, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set_i = new (&allocator_) HArraySet(arr, i, c0, DataType::Type::kInt32, 0);
+ HInstruction* add0 = new (GetAllocator()) HAdd(DataType::Type::kInt32, i, c0);
+ HInstruction* add1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, i, c1);
+ HInstruction* sub0 = new (GetAllocator()) HSub(DataType::Type::kInt32, i, c0);
+ HInstruction* sub1 = new (GetAllocator()) HSub(DataType::Type::kInt32, i, c1);
+ HInstruction* arr_set_0 =
+ new (GetAllocator()) HArraySet(arr, c0, c0, DataType::Type::kInt32, 0);
+ HInstruction* arr_set_1 =
+ new (GetAllocator()) HArraySet(arr, c1, c0, DataType::Type::kInt32, 0);
+ HInstruction* arr_set_i = new (GetAllocator()) HArraySet(arr, i, c0, DataType::Type::kInt32, 0);
HInstruction* arr_set_add0 =
- new (&allocator_) HArraySet(arr, add0, c0, DataType::Type::kInt32, 0);
+ new (GetAllocator()) HArraySet(arr, add0, c0, DataType::Type::kInt32, 0);
HInstruction* arr_set_add1 =
- new (&allocator_) HArraySet(arr, add1, c0, DataType::Type::kInt32, 0);
+ new (GetAllocator()) HArraySet(arr, add1, c0, DataType::Type::kInt32, 0);
HInstruction* arr_set_sub0 =
- new (&allocator_) HArraySet(arr, sub0, c0, DataType::Type::kInt32, 0);
+ new (GetAllocator()) HArraySet(arr, sub0, c0, DataType::Type::kInt32, 0);
HInstruction* arr_set_sub1 =
- new (&allocator_) HArraySet(arr, sub1, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set_j = new (&allocator_) HArraySet(arr, j, c0, DataType::Type::kInt32, 0);
- HInstanceFieldSet* set_field10 = new (&allocator_) HInstanceFieldSet(object,
- c1,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(10),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph_->GetDexFile(),
- 0);
+ new (GetAllocator()) HArraySet(arr, sub1, c0, DataType::Type::kInt32, 0);
+ HInstruction* arr_set_j = new (GetAllocator()) HArraySet(arr, j, c0, DataType::Type::kInt32, 0);
+ HInstanceFieldSet* set_field10 = new (GetAllocator()) HInstanceFieldSet(object,
+ c1,
+ nullptr,
+ DataType::Type::kInt32,
+ MemberOffset(10),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph_->GetDexFile(),
+ 0);
HInstruction* block_instructions[] = {arr,
i,
@@ -270,7 +272,7 @@
entry->AddInstruction(instr);
}
- SchedulingGraph scheduling_graph(scheduler, graph_->GetArena());
+ SchedulingGraph scheduling_graph(scheduler, GetScopedAllocator());
HeapLocationCollector heap_location_collector(graph_);
heap_location_collector.VisitBasicBlock(entry);
heap_location_collector.BuildAliasingMatrix();
@@ -342,21 +344,19 @@
scheduler->Schedule(graph_);
}
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
};
#if defined(ART_ENABLE_CODEGEN_arm64)
TEST_F(SchedulerTest, DependencyGraphAndSchedulerARM64) {
CriticalPathSchedulingNodeSelector critical_path_selector;
- arm64::HSchedulerARM64 scheduler(&allocator_, &critical_path_selector);
+ arm64::HSchedulerARM64 scheduler(GetScopedAllocator(), &critical_path_selector);
TestBuildDependencyGraphAndSchedule(&scheduler);
}
TEST_F(SchedulerTest, ArrayAccessAliasingARM64) {
CriticalPathSchedulingNodeSelector critical_path_selector;
- arm64::HSchedulerARM64 scheduler(&allocator_, &critical_path_selector);
+ arm64::HSchedulerARM64 scheduler(GetScopedAllocator(), &critical_path_selector);
TestDependencyGraphOnAliasingArrayAccesses(&scheduler);
}
#endif
@@ -365,14 +365,14 @@
TEST_F(SchedulerTest, DependencyGraphAndSchedulerARM) {
CriticalPathSchedulingNodeSelector critical_path_selector;
arm::SchedulingLatencyVisitorARM arm_latency_visitor(/*CodeGenerator*/ nullptr);
- arm::HSchedulerARM scheduler(&allocator_, &critical_path_selector, &arm_latency_visitor);
+ arm::HSchedulerARM scheduler(GetScopedAllocator(), &critical_path_selector, &arm_latency_visitor);
TestBuildDependencyGraphAndSchedule(&scheduler);
}
TEST_F(SchedulerTest, ArrayAccessAliasingARM) {
CriticalPathSchedulingNodeSelector critical_path_selector;
arm::SchedulingLatencyVisitorARM arm_latency_visitor(/*CodeGenerator*/ nullptr);
- arm::HSchedulerARM scheduler(&allocator_, &critical_path_selector, &arm_latency_visitor);
+ arm::HSchedulerARM scheduler(GetScopedAllocator(), &critical_path_selector, &arm_latency_visitor);
TestDependencyGraphOnAliasingArrayAccesses(&scheduler);
}
#endif
diff --git a/compiler/optimizing/select_generator.cc b/compiler/optimizing/select_generator.cc
index 827b591..0e46aec 100644
--- a/compiler/optimizing/select_generator.cc
+++ b/compiler/optimizing/select_generator.cc
@@ -135,10 +135,10 @@
DCHECK(both_successors_return || phi != nullptr);
// Create the Select instruction and insert it in front of the If.
- HSelect* select = new (graph_->GetArena()) HSelect(if_instruction->InputAt(0),
- true_value,
- false_value,
- if_instruction->GetDexPc());
+ HSelect* select = new (graph_->GetAllocator()) HSelect(if_instruction->InputAt(0),
+ true_value,
+ false_value,
+ if_instruction->GetDexPc());
if (both_successors_return) {
if (true_value->GetType() == DataType::Type::kReference) {
DCHECK(false_value->GetType() == DataType::Type::kReference);
diff --git a/compiler/optimizing/side_effects_analysis.h b/compiler/optimizing/side_effects_analysis.h
index fea47e6..cf00e48 100644
--- a/compiler/optimizing/side_effects_analysis.h
+++ b/compiler/optimizing/side_effects_analysis.h
@@ -29,9 +29,9 @@
: HOptimization(graph, pass_name),
graph_(graph),
block_effects_(graph->GetBlocks().size(),
- graph->GetArena()->Adapter(kArenaAllocSideEffectsAnalysis)),
+ graph->GetAllocator()->Adapter(kArenaAllocSideEffectsAnalysis)),
loop_effects_(graph->GetBlocks().size(),
- graph->GetArena()->Adapter(kArenaAllocSideEffectsAnalysis)) {}
+ graph->GetAllocator()->Adapter(kArenaAllocSideEffectsAnalysis)) {}
SideEffects GetLoopEffects(HBasicBlock* block) const;
SideEffects GetBlockEffects(HBasicBlock* block) const;
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 2356316..f4a8a17 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -233,7 +233,7 @@
}
void SsaBuilder::RunPrimitiveTypePropagation() {
- ArenaVector<HPhi*> worklist(graph_->GetArena()->Adapter(kArenaAllocGraphBuilder));
+ ArenaVector<HPhi*> worklist(graph_->GetAllocator()->Adapter(kArenaAllocGraphBuilder));
for (HBasicBlock* block : graph_->GetReversePostOrder()) {
if (block->IsLoopHeader()) {
@@ -293,7 +293,7 @@
DCHECK(DataType::IsIntOrLongType(type));
DCHECK(FindFloatOrDoubleEquivalentOfArrayGet(aget) == nullptr);
- HArrayGet* equivalent = new (aget->GetBlock()->GetGraph()->GetArena()) HArrayGet(
+ HArrayGet* equivalent = new (aget->GetBlock()->GetGraph()->GetAllocator()) HArrayGet(
aget->GetArray(),
aget->GetIndex(),
type == DataType::Type::kInt32 ? DataType::Type::kFloat32 : DataType::Type::kFloat64,
@@ -319,7 +319,7 @@
// uses (because they are untyped) and environment uses (if --debuggable).
// After resolving all ambiguous ArrayGets, we will re-run primitive type
// propagation on the Phis which need to be updated.
- ArenaVector<HPhi*> worklist(graph_->GetArena()->Adapter(kArenaAllocGraphBuilder));
+ ArenaVector<HPhi*> worklist(graph_->GetAllocator()->Adapter(kArenaAllocGraphBuilder));
{
ScopedObjectAccess soa(Thread::Current());
@@ -566,7 +566,7 @@
HFloatConstant* result = constant->GetNext()->AsFloatConstant();
if (result == nullptr) {
float value = bit_cast<float, int32_t>(constant->GetValue());
- result = new (graph_->GetArena()) HFloatConstant(value);
+ result = new (graph_->GetAllocator()) HFloatConstant(value);
constant->GetBlock()->InsertInstructionBefore(result, constant->GetNext());
graph_->CacheFloatConstant(result);
} else {
@@ -588,7 +588,7 @@
HDoubleConstant* result = constant->GetNext()->AsDoubleConstant();
if (result == nullptr) {
double value = bit_cast<double, int64_t>(constant->GetValue());
- result = new (graph_->GetArena()) HDoubleConstant(value);
+ result = new (graph_->GetAllocator()) HDoubleConstant(value);
constant->GetBlock()->InsertInstructionBefore(result, constant->GetNext());
graph_->CacheDoubleConstant(result);
} else {
@@ -621,7 +621,7 @@
if (next == nullptr
|| (next->AsPhi()->GetRegNumber() != phi->GetRegNumber())
|| (next->GetType() != type)) {
- ArenaAllocator* allocator = graph_->GetArena();
+ ArenaAllocator* allocator = graph_->GetAllocator();
HInputsRef inputs = phi->GetInputs();
HPhi* new_phi =
new (allocator) HPhi(allocator, phi->GetRegNumber(), inputs.size(), type);
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index 1819ee5..509cdc1 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -56,9 +56,9 @@
dex_cache_(dex_cache),
handles_(handles),
agets_fixed_(false),
- ambiguous_agets_(graph->GetArena()->Adapter(kArenaAllocGraphBuilder)),
- ambiguous_asets_(graph->GetArena()->Adapter(kArenaAllocGraphBuilder)),
- uninitialized_strings_(graph->GetArena()->Adapter(kArenaAllocGraphBuilder)) {
+ ambiguous_agets_(graph->GetAllocator()->Adapter(kArenaAllocGraphBuilder)),
+ ambiguous_asets_(graph->GetAllocator()->Adapter(kArenaAllocGraphBuilder)),
+ uninitialized_strings_(graph->GetAllocator()->Adapter(kArenaAllocGraphBuilder)) {
graph_->InitializeInexactObjectRTI(handles);
}
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index f1f1be2..9ab7a89 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -26,7 +26,7 @@
void SsaLivenessAnalysis::Analyze() {
// Compute the linear order directly in the graph's data structure
// (there are no more following graph mutations).
- LinearizeGraph(graph_, graph_->GetArena(), &graph_->linear_order_);
+ LinearizeGraph(graph_, &graph_->linear_order_);
// Liveness analysis.
NumberInstructions();
@@ -56,7 +56,7 @@
instructions_from_ssa_index_.push_back(current);
current->SetSsaIndex(ssa_index++);
current->SetLiveInterval(
- LiveInterval::MakeInterval(graph_->GetArena(), current->GetType(), current));
+ LiveInterval::MakeInterval(allocator_, current->GetType(), current));
}
current->SetLifetimePosition(lifetime_position);
}
@@ -74,7 +74,7 @@
instructions_from_ssa_index_.push_back(current);
current->SetSsaIndex(ssa_index++);
current->SetLiveInterval(
- LiveInterval::MakeInterval(graph_->GetArena(), current->GetType(), current));
+ LiveInterval::MakeInterval(allocator_, current->GetType(), current));
}
instructions_from_lifetime_position_.push_back(current);
current->SetLifetimePosition(lifetime_position);
@@ -89,7 +89,7 @@
void SsaLivenessAnalysis::ComputeLiveness() {
for (HBasicBlock* block : graph_->GetLinearOrder()) {
block_infos_[block->GetBlockId()] =
- new (graph_->GetArena()) BlockInfo(graph_->GetArena(), *block, number_of_ssa_values_);
+ new (allocator_) BlockInfo(allocator_, *block, number_of_ssa_values_);
}
// Compute the live ranges, as well as the initial live_in, live_out, and kill sets.
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index ec4ab31..9800af7 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -20,6 +20,8 @@
#include <iostream>
#include "base/iteration_range.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
#include "nodes.h"
#include "utils/intrusive_forward_list.h"
@@ -32,7 +34,7 @@
class BlockInfo : public ArenaObject<kArenaAllocSsaLiveness> {
public:
- BlockInfo(ArenaAllocator* allocator, const HBasicBlock& block, size_t number_of_ssa_values)
+ BlockInfo(ScopedArenaAllocator* allocator, const HBasicBlock& block, size_t number_of_ssa_values)
: block_(block),
live_in_(allocator, number_of_ssa_values, false, kArenaAllocSsaLiveness),
live_out_(allocator, number_of_ssa_values, false, kArenaAllocSsaLiveness),
@@ -82,7 +84,7 @@
stream << "[" << start_ << "," << end_ << ")";
}
- LiveRange* Dup(ArenaAllocator* allocator) const {
+ LiveRange* Dup(ScopedArenaAllocator* allocator) const {
return new (allocator) LiveRange(
start_, end_, next_ == nullptr ? nullptr : next_->Dup(allocator));
}
@@ -135,7 +137,7 @@
return user_->GetBlock()->GetLoopInformation();
}
- UsePosition* Clone(ArenaAllocator* allocator) const {
+ UsePosition* Clone(ScopedArenaAllocator* allocator) const {
return new (allocator) UsePosition(user_, input_index_, position_);
}
@@ -180,7 +182,7 @@
stream << position_;
}
- EnvUsePosition* Clone(ArenaAllocator* allocator) const {
+ EnvUsePosition* Clone(ScopedArenaAllocator* allocator) const {
return new (allocator) EnvUsePosition(environment_, input_index_, position_);
}
@@ -261,17 +263,19 @@
*/
class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
public:
- static LiveInterval* MakeInterval(ArenaAllocator* allocator,
+ static LiveInterval* MakeInterval(ScopedArenaAllocator* allocator,
DataType::Type type,
HInstruction* instruction = nullptr) {
return new (allocator) LiveInterval(allocator, type, instruction);
}
- static LiveInterval* MakeFixedInterval(ArenaAllocator* allocator, int reg, DataType::Type type) {
+ static LiveInterval* MakeFixedInterval(ScopedArenaAllocator* allocator,
+ int reg,
+ DataType::Type type) {
return new (allocator) LiveInterval(allocator, type, nullptr, true, reg, false);
}
- static LiveInterval* MakeTempInterval(ArenaAllocator* allocator, DataType::Type type) {
+ static LiveInterval* MakeTempInterval(ScopedArenaAllocator* allocator, DataType::Type type) {
return new (allocator) LiveInterval(allocator, type, nullptr, false, kNoRegister, true);
}
@@ -969,7 +973,7 @@
}
private:
- LiveInterval(ArenaAllocator* allocator,
+ LiveInterval(ScopedArenaAllocator* allocator,
DataType::Type type,
HInstruction* defined_by = nullptr,
bool is_fixed = false,
@@ -1082,7 +1086,7 @@
}
}
- ArenaAllocator* const allocator_;
+ ScopedArenaAllocator* const allocator_;
// Ranges of this interval. We need a quick access to the last range to test
// for liveness (see `IsDeadAt`).
@@ -1158,14 +1162,15 @@
*/
class SsaLivenessAnalysis : public ValueObject {
public:
- SsaLivenessAnalysis(HGraph* graph, CodeGenerator* codegen)
+ SsaLivenessAnalysis(HGraph* graph, CodeGenerator* codegen, ScopedArenaAllocator* allocator)
: graph_(graph),
codegen_(codegen),
+ allocator_(allocator),
block_infos_(graph->GetBlocks().size(),
nullptr,
- graph->GetArena()->Adapter(kArenaAllocSsaLiveness)),
- instructions_from_ssa_index_(graph->GetArena()->Adapter(kArenaAllocSsaLiveness)),
- instructions_from_lifetime_position_(graph->GetArena()->Adapter(kArenaAllocSsaLiveness)),
+ allocator_->Adapter(kArenaAllocSsaLiveness)),
+ instructions_from_ssa_index_(allocator_->Adapter(kArenaAllocSsaLiveness)),
+ instructions_from_lifetime_position_(allocator_->Adapter(kArenaAllocSsaLiveness)),
number_of_ssa_values_(0) {
}
@@ -1284,13 +1289,18 @@
HGraph* const graph_;
CodeGenerator* const codegen_;
- ArenaVector<BlockInfo*> block_infos_;
+
+ // Use a local ScopedArenaAllocator for allocating memory.
+ // This allocator must remain alive while doing register allocation.
+ ScopedArenaAllocator* allocator_;
+
+ ScopedArenaVector<BlockInfo*> block_infos_;
// Temporary array used when computing live_in, live_out, and kill sets.
- ArenaVector<HInstruction*> instructions_from_ssa_index_;
+ ScopedArenaVector<HInstruction*> instructions_from_ssa_index_;
// Temporary array used when inserting moves in the graph.
- ArenaVector<HInstruction*> instructions_from_lifetime_position_;
+ ScopedArenaVector<HInstruction*> instructions_from_lifetime_position_;
size_t number_of_ssa_values_;
ART_FRIEND_TEST(RegisterAllocatorTest, SpillInactive);
diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc
index e89bf6d..9b78e0e 100644
--- a/compiler/optimizing/ssa_liveness_analysis_test.cc
+++ b/compiler/optimizing/ssa_liveness_analysis_test.cc
@@ -27,12 +27,10 @@
namespace art {
-class SsaLivenessAnalysisTest : public testing::Test {
+class SsaLivenessAnalysisTest : public OptimizingUnitTest {
public:
SsaLivenessAnalysisTest()
- : pool_(),
- allocator_(&pool_),
- graph_(CreateGraph(&allocator_)),
+ : graph_(CreateGraph()),
compiler_options_(),
instruction_set_(kRuntimeISA) {
std::string error_msg;
@@ -44,7 +42,7 @@
compiler_options_);
CHECK(codegen_ != nullptr) << instruction_set_ << " is not a supported target architecture.";
// Create entry block.
- entry_ = new (&allocator_) HBasicBlock(graph_);
+ entry_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry_);
graph_->SetEntryBlock(entry_);
}
@@ -52,14 +50,12 @@
protected:
HBasicBlock* CreateSuccessor(HBasicBlock* block) {
HGraph* graph = block->GetGraph();
- HBasicBlock* successor = new (&allocator_) HBasicBlock(graph);
+ HBasicBlock* successor = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(successor);
block->AddSuccessor(successor);
return successor;
}
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
CompilerOptions compiler_options_;
InstructionSet instruction_set_;
@@ -69,17 +65,17 @@
};
TEST_F(SsaLivenessAnalysisTest, TestReturnArg) {
- HInstruction* arg = new (&allocator_) HParameterValue(
+ HInstruction* arg = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32);
entry_->AddInstruction(arg);
HBasicBlock* block = CreateSuccessor(entry_);
- HInstruction* ret = new (&allocator_) HReturn(arg);
+ HInstruction* ret = new (GetAllocator()) HReturn(arg);
block->AddInstruction(ret);
- block->AddInstruction(new (&allocator_) HExit());
+ block->AddInstruction(new (GetAllocator()) HExit());
graph_->BuildDominatorTree();
- SsaLivenessAnalysis ssa_analysis(graph_, codegen_.get());
+ SsaLivenessAnalysis ssa_analysis(graph_, codegen_.get(), GetScopedAllocator());
ssa_analysis.Analyze();
std::ostringstream arg_dump;
@@ -89,49 +85,49 @@
}
TEST_F(SsaLivenessAnalysisTest, TestAput) {
- HInstruction* array = new (&allocator_) HParameterValue(
+ HInstruction* array = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
- HInstruction* index = new (&allocator_) HParameterValue(
+ HInstruction* index = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32);
- HInstruction* value = new (&allocator_) HParameterValue(
+ HInstruction* value = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(2), 2, DataType::Type::kInt32);
- HInstruction* extra_arg1 = new (&allocator_) HParameterValue(
+ HInstruction* extra_arg1 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(3), 3, DataType::Type::kInt32);
- HInstruction* extra_arg2 = new (&allocator_) HParameterValue(
+ HInstruction* extra_arg2 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(4), 4, DataType::Type::kReference);
ArenaVector<HInstruction*> args({ array, index, value, extra_arg1, extra_arg2 },
- allocator_.Adapter());
+ GetAllocator()->Adapter());
for (HInstruction* insn : args) {
entry_->AddInstruction(insn);
}
HBasicBlock* block = CreateSuccessor(entry_);
- HInstruction* null_check = new (&allocator_) HNullCheck(array, 0);
+ HInstruction* null_check = new (GetAllocator()) HNullCheck(array, 0);
block->AddInstruction(null_check);
- HEnvironment* null_check_env = new (&allocator_) HEnvironment(&allocator_,
- /* number_of_vregs */ 5,
- /* method */ nullptr,
- /* dex_pc */ 0u,
- null_check);
+ HEnvironment* null_check_env = new (GetAllocator()) HEnvironment(GetAllocator(),
+ /* number_of_vregs */ 5,
+ /* method */ nullptr,
+ /* dex_pc */ 0u,
+ null_check);
null_check_env->CopyFrom(args);
null_check->SetRawEnvironment(null_check_env);
- HInstruction* length = new (&allocator_) HArrayLength(array, 0);
+ HInstruction* length = new (GetAllocator()) HArrayLength(array, 0);
block->AddInstruction(length);
- HInstruction* bounds_check = new (&allocator_) HBoundsCheck(index, length, /* dex_pc */ 0u);
+ HInstruction* bounds_check = new (GetAllocator()) HBoundsCheck(index, length, /* dex_pc */ 0u);
block->AddInstruction(bounds_check);
- HEnvironment* bounds_check_env = new (&allocator_) HEnvironment(&allocator_,
- /* number_of_vregs */ 5,
- /* method */ nullptr,
- /* dex_pc */ 0u,
- bounds_check);
+ HEnvironment* bounds_check_env = new (GetAllocator()) HEnvironment(GetAllocator(),
+ /* number_of_vregs */ 5,
+ /* method */ nullptr,
+ /* dex_pc */ 0u,
+ bounds_check);
bounds_check_env->CopyFrom(args);
bounds_check->SetRawEnvironment(bounds_check_env);
HInstruction* array_set =
- new (&allocator_) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0);
+ new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0);
block->AddInstruction(array_set);
graph_->BuildDominatorTree();
- SsaLivenessAnalysis ssa_analysis(graph_, codegen_.get());
+ SsaLivenessAnalysis ssa_analysis(graph_, codegen_.get(), GetScopedAllocator());
ssa_analysis.Analyze();
EXPECT_FALSE(graph_->IsDebuggable());
@@ -159,53 +155,53 @@
}
TEST_F(SsaLivenessAnalysisTest, TestDeoptimize) {
- HInstruction* array = new (&allocator_) HParameterValue(
+ HInstruction* array = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
- HInstruction* index = new (&allocator_) HParameterValue(
+ HInstruction* index = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32);
- HInstruction* value = new (&allocator_) HParameterValue(
+ HInstruction* value = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(2), 2, DataType::Type::kInt32);
- HInstruction* extra_arg1 = new (&allocator_) HParameterValue(
+ HInstruction* extra_arg1 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(3), 3, DataType::Type::kInt32);
- HInstruction* extra_arg2 = new (&allocator_) HParameterValue(
+ HInstruction* extra_arg2 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(4), 4, DataType::Type::kReference);
ArenaVector<HInstruction*> args({ array, index, value, extra_arg1, extra_arg2 },
- allocator_.Adapter());
+ GetAllocator()->Adapter());
for (HInstruction* insn : args) {
entry_->AddInstruction(insn);
}
HBasicBlock* block = CreateSuccessor(entry_);
- HInstruction* null_check = new (&allocator_) HNullCheck(array, 0);
+ HInstruction* null_check = new (GetAllocator()) HNullCheck(array, 0);
block->AddInstruction(null_check);
- HEnvironment* null_check_env = new (&allocator_) HEnvironment(&allocator_,
- /* number_of_vregs */ 5,
- /* method */ nullptr,
- /* dex_pc */ 0u,
- null_check);
+ HEnvironment* null_check_env = new (GetAllocator()) HEnvironment(GetAllocator(),
+ /* number_of_vregs */ 5,
+ /* method */ nullptr,
+ /* dex_pc */ 0u,
+ null_check);
null_check_env->CopyFrom(args);
null_check->SetRawEnvironment(null_check_env);
- HInstruction* length = new (&allocator_) HArrayLength(array, 0);
+ HInstruction* length = new (GetAllocator()) HArrayLength(array, 0);
block->AddInstruction(length);
// Use HAboveOrEqual+HDeoptimize as the bounds check.
- HInstruction* ae = new (&allocator_) HAboveOrEqual(index, length);
+ HInstruction* ae = new (GetAllocator()) HAboveOrEqual(index, length);
block->AddInstruction(ae);
- HInstruction* deoptimize =
- new(&allocator_) HDeoptimize(&allocator_, ae, DeoptimizationKind::kBlockBCE, /* dex_pc */ 0u);
+ HInstruction* deoptimize = new(GetAllocator()) HDeoptimize(
+ GetAllocator(), ae, DeoptimizationKind::kBlockBCE, /* dex_pc */ 0u);
block->AddInstruction(deoptimize);
- HEnvironment* deoptimize_env = new (&allocator_) HEnvironment(&allocator_,
- /* number_of_vregs */ 5,
- /* method */ nullptr,
- /* dex_pc */ 0u,
- deoptimize);
+ HEnvironment* deoptimize_env = new (GetAllocator()) HEnvironment(GetAllocator(),
+ /* number_of_vregs */ 5,
+ /* method */ nullptr,
+ /* dex_pc */ 0u,
+ deoptimize);
deoptimize_env->CopyFrom(args);
deoptimize->SetRawEnvironment(deoptimize_env);
HInstruction* array_set =
- new (&allocator_) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0);
+ new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0);
block->AddInstruction(array_set);
graph_->BuildDominatorTree();
- SsaLivenessAnalysis ssa_analysis(graph_, codegen_.get());
+ SsaLivenessAnalysis ssa_analysis(graph_, codegen_.get(), GetScopedAllocator());
ssa_analysis.Analyze();
EXPECT_FALSE(graph_->IsDebuggable());
diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc
index b4f8408..3b95b86 100644
--- a/compiler/optimizing/ssa_phi_elimination.cc
+++ b/compiler/optimizing/ssa_phi_elimination.cc
@@ -31,7 +31,7 @@
// Phis are constructed live and should not be revived if previously marked
// dead. This algorithm temporarily breaks that invariant but we DCHECK that
// only phis which were initially live are revived.
- ArenaSet<HPhi*> initially_live(graph_->GetArena()->Adapter(kArenaAllocSsaPhiElimination));
+ ArenaSet<HPhi*> initially_live(graph_->GetAllocator()->Adapter(kArenaAllocSsaPhiElimination));
// Add to the worklist phis referenced by non-phi instructions.
for (HBasicBlock* block : graph_->GetReversePostOrder()) {
@@ -123,11 +123,11 @@
}
}
- ArenaBitVector visited_phis_in_cycle(graph_->GetArena(),
+ ArenaBitVector visited_phis_in_cycle(graph_->GetAllocator(),
graph_->GetCurrentInstructionId(),
/* expandable */ false,
kArenaAllocSsaPhiElimination);
- ArenaVector<HPhi*> cycle_worklist(graph_->GetArena()->Adapter(kArenaAllocSsaPhiElimination));
+ ArenaVector<HPhi*> cycle_worklist(graph_->GetAllocator()->Adapter(kArenaAllocSsaPhiElimination));
while (!worklist_.empty()) {
HPhi* phi = worklist_.back();
diff --git a/compiler/optimizing/ssa_phi_elimination.h b/compiler/optimizing/ssa_phi_elimination.h
index b48e820..e0cde07 100644
--- a/compiler/optimizing/ssa_phi_elimination.h
+++ b/compiler/optimizing/ssa_phi_elimination.h
@@ -31,7 +31,7 @@
public:
explicit SsaDeadPhiElimination(HGraph* graph)
: HOptimization(graph, kSsaDeadPhiEliminationPassName),
- worklist_(graph->GetArena()->Adapter(kArenaAllocSsaPhiElimination)) {
+ worklist_(graph->GetAllocator()->Adapter(kArenaAllocSsaPhiElimination)) {
worklist_.reserve(kDefaultWorklistSize);
}
@@ -60,7 +60,7 @@
public:
explicit SsaRedundantPhiElimination(HGraph* graph)
: HOptimization(graph, kSsaRedundantPhiEliminationPassName),
- worklist_(graph->GetArena()->Adapter(kArenaAllocSsaPhiElimination)) {
+ worklist_(graph->GetAllocator()->Adapter(kArenaAllocSsaPhiElimination)) {
worklist_.reserve(kDefaultWorklistSize);
}
diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc
index ac998db..e08904e 100644
--- a/compiler/optimizing/ssa_test.cc
+++ b/compiler/optimizing/ssa_test.cc
@@ -29,7 +29,10 @@
namespace art {
-class SsaTest : public CommonCompilerTest {};
+class SsaTest : public OptimizingUnitTest {
+ protected:
+ void TestCode(const uint16_t* data, const char* expected);
+};
class SsaPrettyPrinter : public HPrettyPrinter {
public:
@@ -77,10 +80,8 @@
}
}
-static void TestCode(const uint16_t* data, const char* expected) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+void SsaTest::TestCode(const uint16_t* data, const char* expected) {
+ HGraph* graph = CreateCFG(data);
// Suspend checks implementation may change in the future, and this test relies
// on how instructions are ordered.
RemoveSuspendChecks(graph);
diff --git a/compiler/optimizing/suspend_check_test.cc b/compiler/optimizing/suspend_check_test.cc
index 15cd4e8..88336b0 100644
--- a/compiler/optimizing/suspend_check_test.cc
+++ b/compiler/optimizing/suspend_check_test.cc
@@ -28,10 +28,13 @@
* Check that the HGraphBuilder adds suspend checks to backward branches.
*/
-static void TestCode(const uint16_t* data) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+class SuspendCheckTest : public OptimizingUnitTest {
+ protected:
+ void TestCode(const uint16_t* data);
+};
+
+void SuspendCheckTest::TestCode(const uint16_t* data) {
+ HGraph* graph = CreateCFG(data);
HBasicBlock* first_block = graph->GetEntryBlock()->GetSingleSuccessor();
HBasicBlock* loop_header = first_block->GetSingleSuccessor();
ASSERT_TRUE(loop_header->IsLoopHeader());
@@ -39,8 +42,6 @@
ASSERT_TRUE(loop_header->GetFirstInstruction()->IsSuspendCheck());
}
-class SuspendCheckTest : public CommonCompilerTest {};
-
TEST_F(SuspendCheckTest, CFG1) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::NOP,
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index 70f290d..1e9a521 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -57,11 +57,11 @@
#endif
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
- ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset32 offset) {
+ ArenaAllocator* allocator, EntryPointCallingConvention abi, ThreadOffset32 offset) {
using vixl::aarch32::MemOperand;
using vixl::aarch32::pc;
using vixl::aarch32::r0;
- ArmVIXLAssembler assembler(arena);
+ ArmVIXLAssembler assembler(allocator);
switch (abi) {
case kInterpreterAbi: // Thread* is first argument (R0) in interpreter ABI.
@@ -98,8 +98,8 @@
#ifdef ART_ENABLE_CODEGEN_arm64
namespace arm64 {
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
- ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset64 offset) {
- Arm64Assembler assembler(arena);
+ ArenaAllocator* allocator, EntryPointCallingConvention abi, ThreadOffset64 offset) {
+ Arm64Assembler assembler(allocator);
switch (abi) {
case kInterpreterAbi: // Thread* is first argument (X0) in interpreter ABI.
@@ -137,8 +137,8 @@
#ifdef ART_ENABLE_CODEGEN_mips
namespace mips {
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
- ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset32 offset) {
- MipsAssembler assembler(arena);
+ ArenaAllocator* allocator, EntryPointCallingConvention abi, ThreadOffset32 offset) {
+ MipsAssembler assembler(allocator);
switch (abi) {
case kInterpreterAbi: // Thread* is first argument (A0) in interpreter ABI.
@@ -169,8 +169,8 @@
#ifdef ART_ENABLE_CODEGEN_mips64
namespace mips64 {
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
- ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset64 offset) {
- Mips64Assembler assembler(arena);
+ ArenaAllocator* allocator, EntryPointCallingConvention abi, ThreadOffset64 offset) {
+ Mips64Assembler assembler(allocator);
switch (abi) {
case kInterpreterAbi: // Thread* is first argument (A0) in interpreter ABI.
@@ -200,9 +200,9 @@
#ifdef ART_ENABLE_CODEGEN_x86
namespace x86 {
-static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* arena,
+static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* allocator,
ThreadOffset32 offset) {
- X86Assembler assembler(arena);
+ X86Assembler assembler(allocator);
// All x86 trampolines call via the Thread* held in fs.
__ fs()->jmp(Address::Absolute(offset));
@@ -221,9 +221,9 @@
#ifdef ART_ENABLE_CODEGEN_x86_64
namespace x86_64 {
-static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* arena,
+static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* allocator,
ThreadOffset64 offset) {
- x86_64::X86_64Assembler assembler(arena);
+ x86_64::X86_64Assembler assembler(allocator);
// All x86 trampolines call via the Thread* held in gs.
__ gs()->jmp(x86_64::Address::Absolute(offset, true));
diff --git a/compiler/utils/arm/assembler_arm_vixl.h b/compiler/utils/arm/assembler_arm_vixl.h
index 9c11fd3..0e73e6b 100644
--- a/compiler/utils/arm/assembler_arm_vixl.h
+++ b/compiler/utils/arm/assembler_arm_vixl.h
@@ -151,8 +151,8 @@
private:
class ArmException;
public:
- explicit ArmVIXLAssembler(ArenaAllocator* arena)
- : Assembler(arena) {
+ explicit ArmVIXLAssembler(ArenaAllocator* allocator)
+ : Assembler(allocator) {
// Use Thumb2 instruction set.
vixl_masm_.UseT32();
}
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
index ed57ca6..0bae4d4 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
@@ -117,7 +117,8 @@
}
void ArmVIXLJNIMacroAssembler::RemoveFrame(size_t frame_size,
- ArrayRef<const ManagedRegister> callee_save_regs) {
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ bool may_suspend) {
CHECK_ALIGNED(frame_size, kStackAlignment);
cfi().RememberState();
@@ -152,9 +153,33 @@
___ Pop(RegisterList(core_spill_mask));
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
- // Refresh Mark Register.
- // TODO: Refresh MR only if suspend is taken.
- ___ Ldr(mr, MemOperand(tr, Thread::IsGcMarkingOffset<kArmPointerSize>().Int32Value()));
+ if (may_suspend) {
+ // The method may be suspended; refresh the Marking Register.
+ ___ Ldr(mr, MemOperand(tr, Thread::IsGcMarkingOffset<kArmPointerSize>().Int32Value()));
+ } else {
+ // The method shall not be suspended; no need to refresh the Marking Register.
+
+ // Check that the Marking Register is a callee-save register,
+ // and thus has been preserved by native code following the
+ // AAPCS calling convention.
+ DCHECK_NE(core_spill_mask & (1 << MR), 0)
+ << "core_spill_mask should contain Marking Register R" << MR;
+
+ // The following condition is a compile-time one, so it does not have a run-time cost.
+ if (kIsDebugBuild) {
+ // The following condition is a run-time one; it is executed after the
+ // previous compile-time test, to avoid penalizing non-debug builds.
+ if (emit_run_time_checks_in_debug_mode_) {
+ // Emit a run-time check verifying that the Marking Register is up-to-date.
+ UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ // Ensure we are not clobbering a callee-save register that was restored before.
+ DCHECK_EQ(core_spill_mask & (1 << temp.GetCode()), 0)
+ << "core_spill_mask hould not contain scratch register R" << temp.GetCode();
+ asm_.GenerateMarkingRegisterCheck(temp);
+ }
+ }
+ }
}
// Return to LR.
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
index f3baf1f..e239004 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
@@ -35,9 +35,9 @@
private:
class ArmException;
public:
- explicit ArmVIXLJNIMacroAssembler(ArenaAllocator* arena)
- : JNIMacroAssemblerFwd(arena),
- exception_blocks_(arena->Adapter(kArenaAllocAssembler)) {}
+ explicit ArmVIXLJNIMacroAssembler(ArenaAllocator* allocator)
+ : JNIMacroAssemblerFwd(allocator),
+ exception_blocks_(allocator->Adapter(kArenaAllocAssembler)) {}
virtual ~ArmVIXLJNIMacroAssembler() {}
void FinalizeCode() OVERRIDE;
@@ -54,7 +54,8 @@
// Emit code that will remove an activation from the stack.
void RemoveFrame(size_t frame_size,
- ArrayRef<const ManagedRegister> callee_save_regs) OVERRIDE;
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ bool may_suspend) OVERRIDE;
void IncreaseFrameSize(size_t adjust) OVERRIDE;
void DecreaseFrameSize(size_t adjust) OVERRIDE;
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 6b28363..e5ec24a 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -61,7 +61,7 @@
class Arm64Assembler FINAL : public Assembler {
public:
- explicit Arm64Assembler(ArenaAllocator* arena) : Assembler(arena) {}
+ explicit Arm64Assembler(ArenaAllocator* allocator) : Assembler(allocator) {}
virtual ~Arm64Assembler() {}
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.cc b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
index 9732b76..573bb6d 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.cc
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
@@ -743,7 +743,8 @@
}
void Arm64JNIMacroAssembler::RemoveFrame(size_t frame_size,
- ArrayRef<const ManagedRegister> callee_save_regs) {
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ bool may_suspend) {
// Setup VIXL CPURegList for callee-saves.
CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
@@ -773,10 +774,36 @@
asm_.UnspillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
- // Refresh Mark Register.
- // TODO: Refresh MR only if suspend is taken.
- ___ Ldr(reg_w(MR),
- MemOperand(reg_x(TR), Thread::IsGcMarkingOffset<kArm64PointerSize>().Int32Value()));
+ vixl::aarch64::Register mr = reg_x(MR); // Marking Register.
+ vixl::aarch64::Register tr = reg_x(TR); // Thread Register.
+
+ if (may_suspend) {
+ // The method may be suspended; refresh the Marking Register.
+ ___ Ldr(mr.W(), MemOperand(tr, Thread::IsGcMarkingOffset<kArm64PointerSize>().Int32Value()));
+ } else {
+ // The method shall not be suspended; no need to refresh the Marking Register.
+
+ // Check that the Marking Register is a callee-save register,
+ // and thus has been preserved by native code following the
+ // AAPCS64 calling convention.
+ DCHECK(core_reg_list.IncludesAliasOf(mr))
+ << "core_reg_list should contain Marking Register X" << mr.GetCode();
+
+ // The following condition is a compile-time one, so it does not have a run-time cost.
+ if (kIsDebugBuild) {
+ // The following condition is a run-time one; it is executed after the
+ // previous compile-time test, to avoid penalizing non-debug builds.
+ if (emit_run_time_checks_in_debug_mode_) {
+ // Emit a run-time check verifying that the Marking Register is up-to-date.
+ UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+ Register temp = temps.AcquireW();
+ // Ensure we are not clobbering a callee-save register that was restored before.
+ DCHECK(!core_reg_list.IncludesAliasOf(temp.X()))
+ << "core_reg_list should not contain scratch register X" << temp.GetCode();
+ asm_.GenerateMarkingRegisterCheck(temp);
+ }
+ }
+ }
}
// Decrease frame size to start of callee saved regs.
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.h b/compiler/utils/arm64/jni_macro_assembler_arm64.h
index baf0434..fda87aa 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.h
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.h
@@ -40,9 +40,9 @@
class Arm64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<Arm64Assembler, PointerSize::k64> {
public:
- explicit Arm64JNIMacroAssembler(ArenaAllocator* arena)
- : JNIMacroAssemblerFwd(arena),
- exception_blocks_(arena->Adapter(kArenaAllocAssembler)) {}
+ explicit Arm64JNIMacroAssembler(ArenaAllocator* allocator)
+ : JNIMacroAssemblerFwd(allocator),
+ exception_blocks_(allocator->Adapter(kArenaAllocAssembler)) {}
~Arm64JNIMacroAssembler();
@@ -56,8 +56,9 @@
const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
// Emit code that will remove an activation from the stack.
- void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
- OVERRIDE;
+ void RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ bool may_suspend) OVERRIDE;
void IncreaseFrameSize(size_t adjust) OVERRIDE;
void DecreaseFrameSize(size_t adjust) OVERRIDE;
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index 25eca23..944c64b 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -25,10 +25,10 @@
namespace art {
-AssemblerBuffer::AssemblerBuffer(ArenaAllocator* arena)
- : arena_(arena) {
+AssemblerBuffer::AssemblerBuffer(ArenaAllocator* allocator)
+ : allocator_(allocator) {
static const size_t kInitialBufferCapacity = 4 * KB;
- contents_ = arena_->AllocArray<uint8_t>(kInitialBufferCapacity, kArenaAllocAssembler);
+ contents_ = allocator_->AllocArray<uint8_t>(kInitialBufferCapacity, kArenaAllocAssembler);
cursor_ = contents_;
limit_ = ComputeLimit(contents_, kInitialBufferCapacity);
fixup_ = nullptr;
@@ -45,8 +45,8 @@
AssemblerBuffer::~AssemblerBuffer() {
- if (arena_->IsRunningOnMemoryTool()) {
- arena_->MakeInaccessible(contents_, Capacity());
+ if (allocator_->IsRunningOnMemoryTool()) {
+ allocator_->MakeInaccessible(contents_, Capacity());
}
}
@@ -81,7 +81,7 @@
// Allocate the new data area and copy contents of the old one to it.
contents_ = reinterpret_cast<uint8_t*>(
- arena_->Realloc(contents_, old_capacity, new_capacity, kArenaAllocAssembler));
+ allocator_->Realloc(contents_, old_capacity, new_capacity, kArenaAllocAssembler));
// Update the cursor and recompute the limit.
cursor_ = contents_ + old_size;
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 314ff8c..dbd35ab 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -89,11 +89,11 @@
class AssemblerBuffer {
public:
- explicit AssemblerBuffer(ArenaAllocator* arena);
+ explicit AssemblerBuffer(ArenaAllocator* allocator);
~AssemblerBuffer();
- ArenaAllocator* GetArena() {
- return arena_;
+ ArenaAllocator* GetAllocator() {
+ return allocator_;
}
// Basic support for emitting, loading, and storing.
@@ -252,7 +252,7 @@
// for a single, fast space check per instruction.
static const int kMinimumGap = 32;
- ArenaAllocator* arena_;
+ ArenaAllocator* allocator_;
uint8_t* contents_;
uint8_t* cursor_;
uint8_t* limit_;
@@ -392,8 +392,8 @@
*/
DebugFrameOpCodeWriterForAssembler& cfi() { return cfi_; }
- ArenaAllocator* GetArena() {
- return buffer_.GetArena();
+ ArenaAllocator* GetAllocator() {
+ return buffer_.GetAllocator();
}
AssemblerBuffer* GetBuffer() {
@@ -401,7 +401,7 @@
}
protected:
- explicit Assembler(ArenaAllocator* arena) : buffer_(arena), cfi_(this) {}
+ explicit Assembler(ArenaAllocator* allocator) : buffer_(allocator), cfi_(this) {}
AssemblerBuffer buffer_;
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index 227954e..11a9b91 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -741,8 +741,8 @@
}
// Override this to set up any architecture-specific things, e.g., CPU revision.
- virtual Ass* CreateAssembler(ArenaAllocator* arena) {
- return new (arena) Ass(arena);
+ virtual Ass* CreateAssembler(ArenaAllocator* allocator) {
+ return new (allocator) Ass(allocator);
}
// Override this to set up any architecture-specific things, e.g., register vectors.
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 4dbe71b..5622f89 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -285,7 +285,7 @@
__ DecreaseFrameSize(4096);
__ DecreaseFrameSize(32);
- __ RemoveFrame(frame_size, callee_save_regs);
+ __ RemoveFrame(frame_size, callee_save_regs, /* may_suspend */ true);
EmitAndCheck(&assembler, "VixlJniHelpers");
}
diff --git a/compiler/utils/jni_macro_assembler.cc b/compiler/utils/jni_macro_assembler.cc
index 3ac6c3c..0616b35 100644
--- a/compiler/utils/jni_macro_assembler.cc
+++ b/compiler/utils/jni_macro_assembler.cc
@@ -47,7 +47,7 @@
template <>
MacroAsm32UniquePtr JNIMacroAssembler<PointerSize::k32>::Create(
- ArenaAllocator* arena,
+ ArenaAllocator* allocator,
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features) {
#ifndef ART_ENABLE_CODEGEN_mips
@@ -58,19 +58,19 @@
#ifdef ART_ENABLE_CODEGEN_arm
case kArm:
case kThumb2:
- return MacroAsm32UniquePtr(new (arena) arm::ArmVIXLJNIMacroAssembler(arena));
+ return MacroAsm32UniquePtr(new (allocator) arm::ArmVIXLJNIMacroAssembler(allocator));
#endif
#ifdef ART_ENABLE_CODEGEN_mips
case kMips:
- return MacroAsm32UniquePtr(new (arena) mips::MipsAssembler(
- arena,
+ return MacroAsm32UniquePtr(new (allocator) mips::MipsAssembler(
+ allocator,
instruction_set_features != nullptr
? instruction_set_features->AsMipsInstructionSetFeatures()
: nullptr));
#endif
#ifdef ART_ENABLE_CODEGEN_x86
case kX86:
- return MacroAsm32UniquePtr(new (arena) x86::X86JNIMacroAssembler(arena));
+ return MacroAsm32UniquePtr(new (allocator) x86::X86JNIMacroAssembler(allocator));
#endif
default:
LOG(FATAL) << "Unknown/unsupported 4B InstructionSet: " << instruction_set;
@@ -82,7 +82,7 @@
template <>
MacroAsm64UniquePtr JNIMacroAssembler<PointerSize::k64>::Create(
- ArenaAllocator* arena,
+ ArenaAllocator* allocator,
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features) {
#ifndef ART_ENABLE_CODEGEN_mips64
@@ -92,22 +92,22 @@
switch (instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64:
- return MacroAsm64UniquePtr(new (arena) arm64::Arm64JNIMacroAssembler(arena));
+ return MacroAsm64UniquePtr(new (allocator) arm64::Arm64JNIMacroAssembler(allocator));
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64:
- return MacroAsm64UniquePtr(new (arena) mips64::Mips64Assembler(
- arena,
+ return MacroAsm64UniquePtr(new (allocator) mips64::Mips64Assembler(
+ allocator,
instruction_set_features != nullptr
? instruction_set_features->AsMips64InstructionSetFeatures()
: nullptr));
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64:
- return MacroAsm64UniquePtr(new (arena) x86_64::X86_64JNIMacroAssembler(arena));
+ return MacroAsm64UniquePtr(new (allocator) x86_64::X86_64JNIMacroAssembler(allocator));
#endif
default:
- UNUSED(arena);
+ UNUSED(allocator);
LOG(FATAL) << "Unknown/unsupported 8B InstructionSet: " << instruction_set;
UNREACHABLE();
}
diff --git a/compiler/utils/jni_macro_assembler.h b/compiler/utils/jni_macro_assembler.h
index a8ca111..0fc1353 100644
--- a/compiler/utils/jni_macro_assembler.h
+++ b/compiler/utils/jni_macro_assembler.h
@@ -46,7 +46,7 @@
class JNIMacroAssembler : public DeletableArenaObject<kArenaAllocAssembler> {
public:
static std::unique_ptr<JNIMacroAssembler<kPointerSize>> Create(
- ArenaAllocator* arena,
+ ArenaAllocator* allocator,
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features = nullptr);
@@ -66,7 +66,13 @@
const ManagedRegisterEntrySpills& entry_spills) = 0;
// Emit code that will remove an activation from the stack
- virtual void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs) = 0;
+ //
+ // Argument `may_suspend` must be `true` if the compiled method may be
+ // suspended during its execution (otherwise `false`, if it is impossible
+ // to suspend during its execution).
+ virtual void RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ bool may_suspend) = 0;
virtual void IncreaseFrameSize(size_t adjust) = 0;
virtual void DecreaseFrameSize(size_t adjust) = 0;
@@ -269,7 +275,7 @@
}
protected:
- explicit JNIMacroAssemblerFwd(ArenaAllocator* arena) : asm_(arena) {}
+ explicit JNIMacroAssemblerFwd(ArenaAllocator* allocator) : asm_(allocator) {}
T asm_;
};
diff --git a/compiler/utils/jni_macro_assembler_test.h b/compiler/utils/jni_macro_assembler_test.h
index 6129680..ba95e21 100644
--- a/compiler/utils/jni_macro_assembler_test.h
+++ b/compiler/utils/jni_macro_assembler_test.h
@@ -80,8 +80,8 @@
}
// Override this to set up any architecture-specific things, e.g., CPU revision.
- virtual Ass* CreateAssembler(ArenaAllocator* arena) {
- return new (arena) Ass(arena);
+ virtual Ass* CreateAssembler(ArenaAllocator* allocator) {
+ return new (allocator) Ass(allocator);
}
// Override this to set up any architecture-specific things, e.g., register vectors.
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index b300cc5..b83e3f5 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -5016,7 +5016,8 @@
}
void MipsAssembler::RemoveFrame(size_t frame_size,
- ArrayRef<const ManagedRegister> callee_save_regs) {
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ bool may_suspend ATTRIBUTE_UNUSED) {
CHECK_ALIGNED(frame_size, kStackAlignment);
DCHECK(!overwriting_);
cfi_.RememberState();
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 0b4eb9c..57b3edd 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -192,16 +192,16 @@
public:
using JNIBase = JNIMacroAssembler<PointerSize::k32>;
- explicit MipsAssembler(ArenaAllocator* arena,
+ explicit MipsAssembler(ArenaAllocator* allocator,
const MipsInstructionSetFeatures* instruction_set_features = nullptr)
- : Assembler(arena),
+ : Assembler(allocator),
overwriting_(false),
overwrite_location_(0),
reordering_(true),
ds_fsm_state_(kExpectingLabel),
ds_fsm_target_pc_(0),
- literals_(arena->Adapter(kArenaAllocAssembler)),
- jump_tables_(arena->Adapter(kArenaAllocAssembler)),
+ literals_(allocator->Adapter(kArenaAllocAssembler)),
+ jump_tables_(allocator->Adapter(kArenaAllocAssembler)),
last_position_adjustment_(0),
last_old_position_(0),
last_branch_id_(0),
@@ -1090,8 +1090,9 @@
const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
// Emit code that will remove an activation from the stack.
- void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
- OVERRIDE;
+ void RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ bool may_suspend) OVERRIDE;
void IncreaseFrameSize(size_t adjust) OVERRIDE;
void DecreaseFrameSize(size_t adjust) OVERRIDE;
diff --git a/compiler/utils/mips/assembler_mips32r5_test.cc b/compiler/utils/mips/assembler_mips32r5_test.cc
index a3662db..9a69ffd 100644
--- a/compiler/utils/mips/assembler_mips32r5_test.cc
+++ b/compiler/utils/mips/assembler_mips32r5_test.cc
@@ -72,8 +72,8 @@
return " -D -bbinary -mmips:isa32r5";
}
- mips::MipsAssembler* CreateAssembler(ArenaAllocator* arena) OVERRIDE {
- return new (arena) mips::MipsAssembler(arena, instruction_set_features_.get());
+ mips::MipsAssembler* CreateAssembler(ArenaAllocator* allocator) OVERRIDE {
+ return new (allocator) mips::MipsAssembler(allocator, instruction_set_features_.get());
}
void SetUpHelpers() OVERRIDE {
diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc
index b6cb30a..b12b6b6 100644
--- a/compiler/utils/mips/assembler_mips32r6_test.cc
+++ b/compiler/utils/mips/assembler_mips32r6_test.cc
@@ -85,8 +85,8 @@
return " -D -bbinary -mmips:isa32r6";
}
- mips::MipsAssembler* CreateAssembler(ArenaAllocator* arena) OVERRIDE {
- return new (arena) mips::MipsAssembler(arena, instruction_set_features_.get());
+ mips::MipsAssembler* CreateAssembler(ArenaAllocator* allocator) OVERRIDE {
+ return new (allocator) mips::MipsAssembler(allocator, instruction_set_features_.get());
}
void SetUpHelpers() OVERRIDE {
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 183b5e5..606d4c3 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -3406,7 +3406,8 @@
}
void Mips64Assembler::RemoveFrame(size_t frame_size,
- ArrayRef<const ManagedRegister> callee_save_regs) {
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ bool may_suspend ATTRIBUTE_UNUSED) {
CHECK_ALIGNED(frame_size, kStackAlignment);
DCHECK(!overwriting_);
cfi_.RememberState();
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index bb54382..a3787ac 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -418,14 +418,14 @@
public:
using JNIBase = JNIMacroAssembler<PointerSize::k64>;
- explicit Mips64Assembler(ArenaAllocator* arena,
+ explicit Mips64Assembler(ArenaAllocator* allocator,
const Mips64InstructionSetFeatures* instruction_set_features = nullptr)
- : Assembler(arena),
+ : Assembler(allocator),
overwriting_(false),
overwrite_location_(0),
- literals_(arena->Adapter(kArenaAllocAssembler)),
- long_literals_(arena->Adapter(kArenaAllocAssembler)),
- jump_tables_(arena->Adapter(kArenaAllocAssembler)),
+ literals_(allocator->Adapter(kArenaAllocAssembler)),
+ long_literals_(allocator->Adapter(kArenaAllocAssembler)),
+ jump_tables_(allocator->Adapter(kArenaAllocAssembler)),
last_position_adjustment_(0),
last_old_position_(0),
last_branch_id_(0),
@@ -1278,7 +1278,9 @@
const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
// Emit code that will remove an activation from the stack.
- void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs) OVERRIDE;
+ void RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ bool may_suspend) OVERRIDE;
void IncreaseFrameSize(size_t adjust) OVERRIDE;
void DecreaseFrameSize(size_t adjust) OVERRIDE;
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index 16a36f9..bf0326d 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -83,8 +83,8 @@
return " -D -bbinary -mmips:isa64r6";
}
- mips64::Mips64Assembler* CreateAssembler(ArenaAllocator* arena) OVERRIDE {
- return new (arena) mips64::Mips64Assembler(arena, instruction_set_features_.get());
+ mips64::Mips64Assembler* CreateAssembler(ArenaAllocator* allocator) OVERRIDE {
+ return new (allocator) mips64::Mips64Assembler(allocator, instruction_set_features_.get());
}
void SetUpHelpers() OVERRIDE {
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index dce3ad2..f3b516c 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -266,7 +266,8 @@
*/
class ConstantArea {
public:
- explicit ConstantArea(ArenaAllocator* arena) : buffer_(arena->Adapter(kArenaAllocAssembler)) {}
+ explicit ConstantArea(ArenaAllocator* allocator)
+ : buffer_(allocator->Adapter(kArenaAllocAssembler)) {}
// Add a double to the constant area, returning the offset into
// the constant area where the literal resides.
@@ -307,7 +308,8 @@
class X86Assembler FINAL : public Assembler {
public:
- explicit X86Assembler(ArenaAllocator* arena) : Assembler(arena), constant_area_(arena) {}
+ explicit X86Assembler(ArenaAllocator* allocator)
+ : Assembler(allocator), constant_area_(allocator) {}
virtual ~X86Assembler() {}
/*
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.cc b/compiler/utils/x86/jni_macro_assembler_x86.cc
index e074346..7e29c4a 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.cc
+++ b/compiler/utils/x86/jni_macro_assembler_x86.cc
@@ -85,7 +85,8 @@
}
void X86JNIMacroAssembler::RemoveFrame(size_t frame_size,
- ArrayRef<const ManagedRegister> spill_regs) {
+ ArrayRef<const ManagedRegister> spill_regs,
+ bool may_suspend ATTRIBUTE_UNUSED) {
CHECK_ALIGNED(frame_size, kStackAlignment);
cfi().RememberState();
// -kFramePointerSize for ArtMethod*.
@@ -517,7 +518,7 @@
}
void X86JNIMacroAssembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
- X86ExceptionSlowPath* slow = new (__ GetArena()) X86ExceptionSlowPath(stack_adjust);
+ X86ExceptionSlowPath* slow = new (__ GetAllocator()) X86ExceptionSlowPath(stack_adjust);
__ GetBuffer()->EnqueueSlowPath(slow);
__ fs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()), Immediate(0));
__ j(kNotEqual, slow->Entry());
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.h b/compiler/utils/x86/jni_macro_assembler_x86.h
index 8ffda64..56eaf19 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.h
+++ b/compiler/utils/x86/jni_macro_assembler_x86.h
@@ -34,7 +34,7 @@
class X86JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86Assembler, PointerSize::k32> {
public:
- explicit X86JNIMacroAssembler(ArenaAllocator* arena) : JNIMacroAssemblerFwd(arena) {}
+ explicit X86JNIMacroAssembler(ArenaAllocator* allocator) : JNIMacroAssemblerFwd(allocator) {}
virtual ~X86JNIMacroAssembler() {}
//
@@ -48,8 +48,9 @@
const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
// Emit code that will remove an activation from the stack
- void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
- OVERRIDE;
+ void RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ bool may_suspend) OVERRIDE;
void IncreaseFrameSize(size_t adjust) OVERRIDE;
void DecreaseFrameSize(size_t adjust) OVERRIDE;
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 1130444..0d24a75 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -290,7 +290,8 @@
*/
class ConstantArea {
public:
- explicit ConstantArea(ArenaAllocator* arena) : buffer_(arena->Adapter(kArenaAllocAssembler)) {}
+ explicit ConstantArea(ArenaAllocator* allocator)
+ : buffer_(allocator->Adapter(kArenaAllocAssembler)) {}
// Add a double to the constant area, returning the offset into
// the constant area where the literal resides.
@@ -352,7 +353,8 @@
class X86_64Assembler FINAL : public Assembler {
public:
- explicit X86_64Assembler(ArenaAllocator* arena) : Assembler(arena), constant_area_(arena) {}
+ explicit X86_64Assembler(ArenaAllocator* allocator)
+ : Assembler(allocator), constant_area_(allocator) {}
virtual ~X86_64Assembler() {}
/*
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index aff8871..b08ba4a 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -2043,7 +2043,7 @@
ArrayRef<const ManagedRegister> spill_regs(raw_spill_regs);
size_t frame_size = 10 * kStackAlignment;
- assembler->RemoveFrame(frame_size, spill_regs);
+ assembler->RemoveFrame(frame_size, spill_regs, /* may_suspend */ true);
// Construct assembly text counterpart.
std::ostringstream str;
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
index ec86254..5766f9d 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
@@ -100,7 +100,8 @@
}
void X86_64JNIMacroAssembler::RemoveFrame(size_t frame_size,
- ArrayRef<const ManagedRegister> spill_regs) {
+ ArrayRef<const ManagedRegister> spill_regs,
+ bool may_suspend ATTRIBUTE_UNUSED) {
CHECK_ALIGNED(frame_size, kStackAlignment);
cfi().RememberState();
int gpr_count = 0;
@@ -583,9 +584,10 @@
};
void X86_64JNIMacroAssembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
- X86_64ExceptionSlowPath* slow = new (__ GetArena()) X86_64ExceptionSlowPath(stack_adjust);
+ X86_64ExceptionSlowPath* slow = new (__ GetAllocator()) X86_64ExceptionSlowPath(stack_adjust);
__ GetBuffer()->EnqueueSlowPath(slow);
- __ gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true), Immediate(0));
+ __ gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true),
+ Immediate(0));
__ j(kNotEqual, slow->Entry());
}
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
index aa058f7..d1a3032 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
@@ -34,8 +34,8 @@
class X86_64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86_64Assembler,
PointerSize::k64> {
public:
- explicit X86_64JNIMacroAssembler(ArenaAllocator* arena)
- : JNIMacroAssemblerFwd<X86_64Assembler, PointerSize::k64>(arena) {}
+ explicit X86_64JNIMacroAssembler(ArenaAllocator* allocator)
+ : JNIMacroAssemblerFwd<X86_64Assembler, PointerSize::k64>(allocator) {}
virtual ~X86_64JNIMacroAssembler() {}
//
@@ -49,8 +49,9 @@
const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
// Emit code that will remove an activation from the stack
- void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
- OVERRIDE;
+ void RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ bool may_suspend) OVERRIDE;
void IncreaseFrameSize(size_t adjust) OVERRIDE;
void DecreaseFrameSize(size_t adjust) OVERRIDE;
diff --git a/dex2oat/Android.bp b/dex2oat/Android.bp
index c9125df..a93b0e7 100644
--- a/dex2oat/Android.bp
+++ b/dex2oat/Android.bp
@@ -91,7 +91,10 @@
name: "dex2oat-defaults",
host_supported: true,
defaults: ["art_defaults"],
- srcs: ["dex2oat.cc"],
+ srcs: [
+ "dex2oat_options.cc",
+ "dex2oat.cc",
+ ],
target: {
android: {
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 7b46531..528cf3a 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -20,11 +20,13 @@
#include <sys/stat.h>
#include "base/memory_tool.h"
+#include <forward_list>
#include <fstream>
#include <iostream>
#include <limits>
#include <sstream>
#include <string>
+#include <type_traits>
#include <unordered_set>
#include <vector>
@@ -50,16 +52,19 @@
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
#include "class_loader_context.h"
+#include "cmdline_parser.h"
#include "compiler.h"
#include "compiler_callbacks.h"
#include "debug/elf_debug_writer.h"
#include "debug/method_debug_info.h"
#include "dex/quick_compiler_callbacks.h"
#include "dex/verification_results.h"
+#include "dex2oat_options.h"
#include "dex2oat_return_codes.h"
#include "dex_file-inl.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
+#include "driver/compiler_options_map-inl.h"
#include "elf_file.h"
#include "gc/space/image_space.h"
#include "gc/space/space-inl.h"
@@ -235,6 +240,13 @@
UsageError(" --oat-fd=<number>: specifies the oat output destination via a file descriptor.");
UsageError(" Example: --oat-fd=6");
UsageError("");
+ UsageError(" --input-vdex-fd=<number>: specifies the vdex input source via a file descriptor.");
+ UsageError(" Example: --input-vdex-fd=6");
+ UsageError("");
+ UsageError(" --output-vdex-fd=<number>: specifies the vdex output destination via a file");
+ UsageError(" descriptor.");
+ UsageError(" Example: --output-vdex-fd=6");
+ UsageError("");
UsageError(" --oat-location=<oat-name>: specifies a symbolic name for the file corresponding");
UsageError(" to the file descriptor specified by --oat-fd.");
UsageError(" Example: --oat-location=/data/dalvik-cache/system@app@Calculator.apk.oat");
@@ -659,76 +671,27 @@
std::string error_msg;
};
- void ParseZipFd(const StringPiece& option) {
- ParseUintOption(option, "--zip-fd", &zip_fd_, Usage);
- }
-
- void ParseInputVdexFd(const StringPiece& option) {
- // Note that the input vdex fd might be -1.
- ParseIntOption(option, "--input-vdex-fd", &input_vdex_fd_, Usage);
- }
-
- void ParseOutputVdexFd(const StringPiece& option) {
- ParseUintOption(option, "--output-vdex-fd", &output_vdex_fd_, Usage);
- }
-
- void ParseOatFd(const StringPiece& option) {
- ParseUintOption(option, "--oat-fd", &oat_fd_, Usage);
- }
-
- void ParseFdForCollection(const StringPiece& option,
- const char* arg_name,
- std::vector<uint32_t>* fds) {
- uint32_t fd;
- ParseUintOption(option, arg_name, &fd, Usage);
- fds->push_back(fd);
- }
-
- void ParseJ(const StringPiece& option) {
- ParseUintOption(option, "-j", &thread_count_, Usage, /* is_long_option */ false);
- }
-
- void ParseBase(const StringPiece& option) {
- DCHECK(option.starts_with("--base="));
- const char* image_base_str = option.substr(strlen("--base=")).data();
+ void ParseBase(const std::string& option) {
char* end;
- image_base_ = strtoul(image_base_str, &end, 16);
- if (end == image_base_str || *end != '\0') {
+ image_base_ = strtoul(option.c_str(), &end, 16);
+ if (end == option.c_str() || *end != '\0') {
Usage("Failed to parse hexadecimal value for option %s", option.data());
}
}
- void ParseInstructionSet(const StringPiece& option) {
- DCHECK(option.starts_with("--instruction-set="));
- StringPiece instruction_set_str = option.substr(strlen("--instruction-set=")).data();
- // StringPiece is not necessarily zero-terminated, so need to make a copy and ensure it.
- std::unique_ptr<char[]> buf(new char[instruction_set_str.length() + 1]);
- strncpy(buf.get(), instruction_set_str.data(), instruction_set_str.length());
- buf.get()[instruction_set_str.length()] = 0;
- instruction_set_ = GetInstructionSetFromString(buf.get());
- // arm actually means thumb2.
- if (instruction_set_ == InstructionSet::kArm) {
- instruction_set_ = InstructionSet::kThumb2;
- }
- }
-
bool VerifyProfileData() {
return profile_compilation_info_->VerifyProfileData(dex_files_);
}
- void ParseInstructionSetVariant(const StringPiece& option, ParserOptions* parser_options) {
- DCHECK(option.starts_with("--instruction-set-variant="));
- StringPiece str = option.substr(strlen("--instruction-set-variant=")).data();
+ void ParseInstructionSetVariant(const std::string& option, ParserOptions* parser_options) {
instruction_set_features_ = InstructionSetFeatures::FromVariant(
- instruction_set_, str.as_string(), &parser_options->error_msg);
+ instruction_set_, option, &parser_options->error_msg);
if (instruction_set_features_.get() == nullptr) {
Usage("%s", parser_options->error_msg.c_str());
}
}
- void ParseInstructionSetFeatures(const StringPiece& option, ParserOptions* parser_options) {
- DCHECK(option.starts_with("--instruction-set-features="));
- StringPiece str = option.substr(strlen("--instruction-set-features=")).data();
+ void ParseInstructionSetFeatures(const std::string& option, ParserOptions* parser_options) {
if (instruction_set_features_ == nullptr) {
instruction_set_features_ = InstructionSetFeatures::FromVariant(
instruction_set_, "default", &parser_options->error_msg);
@@ -738,38 +701,9 @@
}
}
instruction_set_features_ =
- instruction_set_features_->AddFeaturesFromString(str.as_string(),
- &parser_options->error_msg);
+ instruction_set_features_->AddFeaturesFromString(option, &parser_options->error_msg);
if (instruction_set_features_ == nullptr) {
- Usage("Error parsing '%s': %s", option.data(), parser_options->error_msg.c_str());
- }
- }
-
- void ParseCompilerBackend(const StringPiece& option, ParserOptions* parser_options) {
- DCHECK(option.starts_with("--compiler-backend="));
- parser_options->requested_specific_compiler = true;
- StringPiece backend_str = option.substr(strlen("--compiler-backend=")).data();
- if (backend_str == "Quick") {
- compiler_kind_ = Compiler::kQuick;
- } else if (backend_str == "Optimizing") {
- compiler_kind_ = Compiler::kOptimizing;
- } else {
- Usage("Unknown compiler backend: %s", backend_str.data());
- }
- }
-
- void ParseImageFormat(const StringPiece& option) {
- const StringPiece substr("--image-format=");
- DCHECK(option.starts_with(substr));
- const StringPiece format_str = option.substr(substr.length());
- if (format_str == "lz4") {
- image_storage_mode_ = ImageHeader::kStorageModeLZ4;
- } else if (format_str == "lz4hc") {
- image_storage_mode_ = ImageHeader::kStorageModeLZ4HC;
- } else if (format_str == "uncompressed") {
- image_storage_mode_ = ImageHeader::kStorageModeUncompressed;
- } else {
- Usage("Unknown image format: %s", format_str.data());
+ Usage("Error parsing '%s': %s", option.c_str(), parser_options->error_msg.c_str());
}
}
@@ -1092,23 +1026,20 @@
base_symbol_oat = base_symbol_oat.substr(0, last_symbol_oat_slash + 1);
}
- const size_t num_expanded_files = 2 + (base_symbol_oat.empty() ? 0 : 1);
- char_backing_storage_.reserve((dex_locations_.size() - 1) * num_expanded_files);
-
// Now create the other names. Use a counted loop to skip the first one.
for (size_t i = 1; i < dex_locations_.size(); ++i) {
// TODO: Make everything properly std::string.
std::string image_name = CreateMultiImageName(dex_locations_[i], prefix, infix, ".art");
- char_backing_storage_.push_back(base_img + image_name);
- image_filenames_.push_back((char_backing_storage_.end() - 1)->c_str());
+ char_backing_storage_.push_front(base_img + image_name);
+ image_filenames_.push_back(char_backing_storage_.front().c_str());
std::string oat_name = CreateMultiImageName(dex_locations_[i], prefix, infix, ".oat");
- char_backing_storage_.push_back(base_oat + oat_name);
- oat_filenames_.push_back((char_backing_storage_.end() - 1)->c_str());
+ char_backing_storage_.push_front(base_oat + oat_name);
+ oat_filenames_.push_back(char_backing_storage_.front().c_str());
if (!base_symbol_oat.empty()) {
- char_backing_storage_.push_back(base_symbol_oat + oat_name);
- oat_unstripped_.push_back((char_backing_storage_.end() - 1)->c_str());
+ char_backing_storage_.push_front(base_symbol_oat + oat_name);
+ oat_unstripped_.push_back(char_backing_storage_.front().c_str());
}
}
}
@@ -1173,6 +1104,43 @@
kUseReadBarrier ? OatHeader::kTrueValue : OatHeader::kFalseValue);
}
+ // This simple forward is here so the string specializations below don't look out of place.
+ template <typename T, typename U>
+ void AssignIfExists(Dex2oatArgumentMap& map,
+ const Dex2oatArgumentMap::Key<T>& key,
+ U* out) {
+ map.AssignIfExists(key, out);
+ }
+
+ // Specializations to handle const char* vs std::string.
+ void AssignIfExists(Dex2oatArgumentMap& map,
+ const Dex2oatArgumentMap::Key<std::string>& key,
+ const char** out) {
+ if (map.Exists(key)) {
+ char_backing_storage_.push_front(std::move(*map.Get(key)));
+ *out = char_backing_storage_.front().c_str();
+ }
+ }
+ void AssignIfExists(Dex2oatArgumentMap& map,
+ const Dex2oatArgumentMap::Key<std::vector<std::string>>& key,
+ std::vector<const char*>* out) {
+ if (map.Exists(key)) {
+ for (auto& val : *map.Get(key)) {
+ char_backing_storage_.push_front(std::move(val));
+ out->push_back(char_backing_storage_.front().c_str());
+ }
+ }
+ }
+
+ template <typename T>
+ void AssignTrueIfExists(Dex2oatArgumentMap& map,
+ const Dex2oatArgumentMap::Key<T>& key,
+ bool* out) {
+ if (map.Exists(key)) {
+ *out = true;
+ }
+ }
+
// Parse the arguments from the command line. In case of an unrecognized option or impossible
// values/combinations, a usage error will be displayed and exit() is called. Thus, if the method
// returns, arguments have been successfully parsed.
@@ -1182,159 +1150,104 @@
InitLogging(argv, Runtime::Abort);
- // Skip over argv[0].
- argv++;
- argc--;
-
- if (argc == 0) {
- Usage("No arguments specified");
- }
-
- std::unique_ptr<ParserOptions> parser_options(new ParserOptions());
compiler_options_.reset(new CompilerOptions());
- for (int i = 0; i < argc; i++) {
- const StringPiece option(argv[i]);
- const bool log_options = false;
- if (log_options) {
- LOG(INFO) << "dex2oat: option[" << i << "]=" << argv[i];
+ using M = Dex2oatArgumentMap;
+ std::string error_msg;
+ std::unique_ptr<M> args_uptr = M::Parse(argc, const_cast<const char**>(argv), &error_msg);
+ if (args_uptr == nullptr) {
+ Usage("Failed to parse command line: %s", error_msg.c_str());
+ UNREACHABLE();
+ }
+
+ M& args = *args_uptr;
+
+ std::unique_ptr<ParserOptions> parser_options(new ParserOptions());
+
+ AssignIfExists(args, M::DexFiles, &dex_filenames_);
+ AssignIfExists(args, M::DexLocations, &dex_locations_);
+ AssignIfExists(args, M::OatFiles, &oat_filenames_);
+ AssignIfExists(args, M::OatSymbols, &parser_options->oat_symbols);
+ AssignIfExists(args, M::ImageFilenames, &image_filenames_);
+ AssignIfExists(args, M::ZipFd, &zip_fd_);
+ AssignIfExists(args, M::ZipLocation, &zip_location_);
+ AssignIfExists(args, M::InputVdexFd, &input_vdex_fd_);
+ AssignIfExists(args, M::OutputVdexFd, &output_vdex_fd_);
+ AssignIfExists(args, M::InputVdex, &input_vdex_);
+ AssignIfExists(args, M::OutputVdex, &output_vdex_);
+ AssignIfExists(args, M::OatFd, &oat_fd_);
+ AssignIfExists(args, M::OatLocation, &oat_location_);
+ AssignIfExists(args, M::Watchdog, &parser_options->watch_dog_enabled);
+ AssignIfExists(args, M::WatchdogTimeout, &parser_options->watch_dog_timeout_in_ms);
+ AssignIfExists(args, M::Threads, &thread_count_);
+ AssignIfExists(args, M::ImageClasses, &image_classes_filename_);
+ AssignIfExists(args, M::ImageClassesZip, &image_classes_zip_filename_);
+ AssignIfExists(args, M::CompiledClasses, &compiled_classes_filename_);
+ AssignIfExists(args, M::CompiledClassesZip, &compiled_classes_zip_filename_);
+ AssignIfExists(args, M::CompiledMethods, &compiled_methods_filename_);
+ AssignIfExists(args, M::CompiledMethodsZip, &compiled_methods_zip_filename_);
+ AssignIfExists(args, M::Passes, &passes_to_run_filename_);
+ AssignIfExists(args, M::BootImage, &parser_options->boot_image_filename);
+ AssignIfExists(args, M::AndroidRoot, &android_root_);
+ AssignIfExists(args, M::Profile, &profile_file_);
+ AssignIfExists(args, M::ProfileFd, &profile_file_fd_);
+ AssignIfExists(args, M::RuntimeOptions, &runtime_args_);
+ AssignIfExists(args, M::SwapFile, &swap_file_name_);
+ AssignIfExists(args, M::SwapFileFd, &swap_fd_);
+ AssignIfExists(args, M::SwapDexSizeThreshold, &min_dex_file_cumulative_size_for_swap_);
+ AssignIfExists(args, M::SwapDexCountThreshold, &min_dex_files_for_swap_);
+ AssignIfExists(args, M::VeryLargeAppThreshold, &very_large_threshold_);
+ AssignIfExists(args, M::AppImageFile, &app_image_file_name_);
+ AssignIfExists(args, M::AppImageFileFd, &app_image_fd_);
+ AssignIfExists(args, M::NoInlineFrom, &no_inline_from_string_);
+ AssignIfExists(args, M::ClasspathDir, &classpath_dir_);
+ AssignIfExists(args, M::DirtyImageObjects, &dirty_image_objects_filename_);
+ AssignIfExists(args, M::ImageFormat, &image_storage_mode_);
+
+ AssignIfExists(args, M::Backend, &compiler_kind_);
+ parser_options->requested_specific_compiler = args.Exists(M::Backend);
+
+ AssignIfExists(args, M::TargetInstructionSet, &instruction_set_);
+ // arm actually means thumb2.
+ if (instruction_set_ == InstructionSet::kArm) {
+ instruction_set_ = InstructionSet::kThumb2;
+ }
+
+ AssignTrueIfExists(args, M::Host, &is_host_);
+ AssignTrueIfExists(args, M::DumpTiming, &dump_timing_);
+ AssignTrueIfExists(args, M::DumpPasses, &dump_passes_);
+ AssignTrueIfExists(args, M::DumpStats, &dump_stats_);
+ AssignTrueIfExists(args, M::AvoidStoringInvocation, &avoid_storing_invocation_);
+ AssignTrueIfExists(args, M::MultiImage, &multi_image_);
+
+ if (args.Exists(M::ForceDeterminism)) {
+ if (!SupportsDeterministicCompilation()) {
+ Usage("Option --force-determinism requires read barriers or a CMS/MS garbage collector");
}
- if (option.starts_with("--dex-file=")) {
- dex_filenames_.push_back(option.substr(strlen("--dex-file=")).data());
- } else if (option.starts_with("--dex-location=")) {
- dex_locations_.push_back(option.substr(strlen("--dex-location=")).data());
- } else if (option.starts_with("--zip-fd=")) {
- ParseZipFd(option);
- } else if (option.starts_with("--zip-location=")) {
- zip_location_ = option.substr(strlen("--zip-location=")).data();
- } else if (option.starts_with("--input-vdex-fd=")) {
- ParseInputVdexFd(option);
- } else if (option.starts_with("--input-vdex=")) {
- input_vdex_ = option.substr(strlen("--input-vdex=")).data();
- } else if (option.starts_with("--output-vdex=")) {
- output_vdex_ = option.substr(strlen("--output-vdex=")).data();
- } else if (option.starts_with("--output-vdex-fd=")) {
- ParseOutputVdexFd(option);
- } else if (option.starts_with("--oat-file=")) {
- oat_filenames_.push_back(option.substr(strlen("--oat-file=")).data());
- } else if (option.starts_with("--oat-symbols=")) {
- parser_options->oat_symbols.push_back(option.substr(strlen("--oat-symbols=")).data());
- } else if (option.starts_with("--oat-fd=")) {
- ParseOatFd(option);
- } else if (option.starts_with("--oat-location=")) {
- oat_location_ = option.substr(strlen("--oat-location=")).data();
- } else if (option == "--watch-dog") {
- parser_options->watch_dog_enabled = true;
- } else if (option == "--no-watch-dog") {
- parser_options->watch_dog_enabled = false;
- } else if (option.starts_with("--watchdog-timeout=")) {
- ParseIntOption(option,
- "--watchdog-timeout",
- &parser_options->watch_dog_timeout_in_ms,
- Usage);
- } else if (option.starts_with("-j")) {
- ParseJ(option);
- } else if (option.starts_with("--image=")) {
- image_filenames_.push_back(option.substr(strlen("--image=")).data());
- } else if (option.starts_with("--image-classes=")) {
- image_classes_filename_ = option.substr(strlen("--image-classes=")).data();
- } else if (option.starts_with("--image-classes-zip=")) {
- image_classes_zip_filename_ = option.substr(strlen("--image-classes-zip=")).data();
- } else if (option.starts_with("--image-format=")) {
- ParseImageFormat(option);
- } else if (option.starts_with("--compiled-classes=")) {
- compiled_classes_filename_ = option.substr(strlen("--compiled-classes=")).data();
- } else if (option.starts_with("--compiled-classes-zip=")) {
- compiled_classes_zip_filename_ = option.substr(strlen("--compiled-classes-zip=")).data();
- } else if (option.starts_with("--compiled-methods=")) {
- compiled_methods_filename_ = option.substr(strlen("--compiled-methods=")).data();
- } else if (option.starts_with("--compiled-methods-zip=")) {
- compiled_methods_zip_filename_ = option.substr(strlen("--compiled-methods-zip=")).data();
- } else if (option.starts_with("--run-passes=")) {
- passes_to_run_filename_ = option.substr(strlen("--run-passes=")).data();
- } else if (option.starts_with("--base=")) {
- ParseBase(option);
- } else if (option.starts_with("--boot-image=")) {
- parser_options->boot_image_filename = option.substr(strlen("--boot-image=")).data();
- } else if (option.starts_with("--android-root=")) {
- android_root_ = option.substr(strlen("--android-root=")).data();
- } else if (option.starts_with("--instruction-set=")) {
- ParseInstructionSet(option);
- } else if (option.starts_with("--instruction-set-variant=")) {
- ParseInstructionSetVariant(option, parser_options.get());
- } else if (option.starts_with("--instruction-set-features=")) {
- ParseInstructionSetFeatures(option, parser_options.get());
- } else if (option.starts_with("--compiler-backend=")) {
- ParseCompilerBackend(option, parser_options.get());
- } else if (option.starts_with("--profile-file=")) {
- profile_file_ = option.substr(strlen("--profile-file=")).ToString();
- } else if (option.starts_with("--profile-file-fd=")) {
- ParseUintOption(option, "--profile-file-fd", &profile_file_fd_, Usage);
- } else if (option == "--host") {
- is_host_ = true;
- } else if (option == "--runtime-arg") {
- if (++i >= argc) {
- Usage("Missing required argument for --runtime-arg");
- }
- if (log_options) {
- LOG(INFO) << "dex2oat: option[" << i << "]=" << argv[i];
- }
- runtime_args_.push_back(argv[i]);
- } else if (option == "--dump-timing") {
- dump_timing_ = true;
- } else if (option == "--dump-passes") {
- dump_passes_ = true;
- } else if (option == "--dump-stats") {
- dump_stats_ = true;
- } else if (option == "--avoid-storing-invocation") {
- avoid_storing_invocation_ = true;
- } else if (option.starts_with("--swap-file=")) {
- swap_file_name_ = option.substr(strlen("--swap-file=")).data();
- } else if (option.starts_with("--swap-fd=")) {
- ParseUintOption(option, "--swap-fd", &swap_fd_, Usage);
- } else if (option.starts_with("--swap-dex-size-threshold=")) {
- ParseUintOption(option,
- "--swap-dex-size-threshold",
- &min_dex_file_cumulative_size_for_swap_,
- Usage);
- } else if (option.starts_with("--swap-dex-count-threshold=")) {
- ParseUintOption(option,
- "--swap-dex-count-threshold",
- &min_dex_files_for_swap_,
- Usage);
- } else if (option.starts_with("--very-large-app-threshold=")) {
- ParseUintOption(option,
- "--very-large-app-threshold",
- &very_large_threshold_,
- Usage);
- } else if (option.starts_with("--app-image-file=")) {
- app_image_file_name_ = option.substr(strlen("--app-image-file=")).data();
- } else if (option.starts_with("--app-image-fd=")) {
- ParseUintOption(option, "--app-image-fd", &app_image_fd_, Usage);
- } else if (option == "--multi-image") {
- multi_image_ = true;
- } else if (option.starts_with("--no-inline-from=")) {
- no_inline_from_string_ = option.substr(strlen("--no-inline-from=")).data();
- } else if (option == "--force-determinism") {
- if (!SupportsDeterministicCompilation()) {
- Usage("Option --force-determinism requires read barriers or a CMS/MS garbage collector");
- }
- force_determinism_ = true;
- } else if (option.starts_with("--classpath-dir=")) {
- classpath_dir_ = option.substr(strlen("--classpath-dir=")).data();
- } else if (option.starts_with("--class-loader-context=")) {
- class_loader_context_ = ClassLoaderContext::Create(
- option.substr(strlen("--class-loader-context=")).data());
- if (class_loader_context_ == nullptr) {
- Usage("Option --class-loader-context has an incorrect format: %s", option.data());
- }
- } else if (option.starts_with("--dirty-image-objects=")) {
- dirty_image_objects_filename_ = option.substr(strlen("--dirty-image-objects=")).data();
- } else if (!compiler_options_->ParseCompilerOption(option, Usage)) {
- Usage("Unknown argument %s", option.data());
+ force_determinism_ = true;
+ }
+
+ if (args.Exists(M::Base)) {
+ ParseBase(*args.Get(M::Base));
+ }
+ if (args.Exists(M::TargetInstructionSetVariant)) {
+ ParseInstructionSetVariant(*args.Get(M::TargetInstructionSetVariant), parser_options.get());
+ }
+ if (args.Exists(M::TargetInstructionSetFeatures)) {
+ ParseInstructionSetFeatures(*args.Get(M::TargetInstructionSetFeatures), parser_options.get());
+ }
+ if (args.Exists(M::ClassLoaderContext)) {
+ class_loader_context_ = ClassLoaderContext::Create(*args.Get(M::ClassLoaderContext));
+ if (class_loader_context_ == nullptr) {
+ Usage("Option --class-loader-context has an incorrect format: %s",
+ args.Get(M::ClassLoaderContext)->c_str());
}
}
+ if (!ReadCompilerOptions(args, compiler_options_.get(), &error_msg)) {
+ Usage(error_msg.c_str());
+ }
+
ProcessOptions(parser_options.get());
// Insert some compiler things.
@@ -2931,7 +2844,7 @@
std::unordered_map<const DexFile*, size_t> dex_file_oat_index_map_;
// Backing storage.
- std::vector<std::string> char_backing_storage_;
+ std::forward_list<std::string> char_backing_storage_;
// See CompilerOptions.force_determinism_.
bool force_determinism_;
diff --git a/dex2oat/dex2oat_image_test.cc b/dex2oat/dex2oat_image_test.cc
index 1f644c1..f20e934 100644
--- a/dex2oat/dex2oat_image_test.cc
+++ b/dex2oat/dex2oat_image_test.cc
@@ -328,8 +328,8 @@
profile_file.Close();
std::cout << "Profile sizes " << profile_sizes << std::endl;
// Since there is some difference between profile vs image + methods due to layout, check that
- // the range is within expected margins (+-5%).
- const double kRatio = 0.95;
+ // the range is within expected margins (+-10%).
+ const double kRatio = 0.90;
EXPECT_LE(profile_sizes.art_size * kRatio, compiled_methods_sizes.art_size);
// TODO(mathieuc): Find a reliable way to check compiled code. b/63746626
// EXPECT_LE(profile_sizes.oat_size * kRatio, compiled_methods_sizes.oat_size);
diff --git a/dex2oat/dex2oat_options.cc b/dex2oat/dex2oat_options.cc
new file mode 100644
index 0000000..43e6c4d
--- /dev/null
+++ b/dex2oat/dex2oat_options.cc
@@ -0,0 +1,268 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dex2oat_options.h"
+
+#include <memory>
+
+#include "cmdline_parser.h"
+#include "driver/compiler_options_map-inl.h"
+
+namespace art {
+
+template<>
+struct CmdlineType<InstructionSet> : CmdlineTypeParser<InstructionSet> {
+ Result Parse(const std::string& option) {
+ InstructionSet set = GetInstructionSetFromString(option.c_str());
+ if (set == kNone) {
+ return Result::Failure(std::string("Not a valid instruction set: '") + option + "'");
+ }
+ return Result::Success(set);
+ }
+
+ static const char* Name() { return "InstructionSet"; }
+};
+
+#define COMPILER_OPTIONS_MAP_TYPE Dex2oatArgumentMap
+#define COMPILER_OPTIONS_MAP_KEY_TYPE Dex2oatArgumentMapKey
+#include "driver/compiler_options_map-storage.h"
+
+// Specify storage for the Dex2oatOptions keys.
+
+#define DEX2OAT_OPTIONS_KEY(Type, Name, ...) \
+ const Dex2oatArgumentMap::Key<Type> Dex2oatArgumentMap::Name {__VA_ARGS__}; // NOLINT [readability/braces] [4]
+#include "dex2oat_options.def"
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wframe-larger-than="
+
+using M = Dex2oatArgumentMap;
+using Parser = CmdlineParser<Dex2oatArgumentMap, Dex2oatArgumentMap::Key>;
+using Builder = Parser::Builder;
+
+static void AddInputMappings(Builder& builder) {
+ builder.
+ Define("--dex-file=_")
+ .WithType<std::vector<std::string>>().AppendValues()
+ .IntoKey(M::DexFiles)
+ .Define("--dex-location=_")
+ .WithType<std::vector<std::string>>().AppendValues()
+ .IntoKey(M::DexLocations)
+ .Define("--zip-fd=_")
+ .WithType<int>()
+ .IntoKey(M::ZipFd)
+ .Define("--zip-location=_")
+ .WithType<std::string>()
+ .IntoKey(M::ZipLocation)
+ .Define("--boot-image=_")
+ .WithType<std::string>()
+ .IntoKey(M::BootImage);
+}
+
+static void AddGeneratedArtifactMappings(Builder& builder) {
+ builder.
+ Define("--input-vdex-fd=_")
+ .WithType<int>()
+ .IntoKey(M::InputVdexFd)
+ .Define("--input-vdex=_")
+ .WithType<std::string>()
+ .IntoKey(M::InputVdex)
+ .Define("--output-vdex-fd=_")
+ .WithType<int>()
+ .IntoKey(M::OutputVdexFd)
+ .Define("--output-vdex=_")
+ .WithType<std::string>()
+ .IntoKey(M::OutputVdex)
+ .Define("--oat-file=_")
+ .WithType<std::vector<std::string>>().AppendValues()
+ .IntoKey(M::OatFiles)
+ .Define("--oat-symbols=_")
+ .WithType<std::vector<std::string>>().AppendValues()
+ .IntoKey(M::OatSymbols)
+ .Define("--oat-fd=_")
+ .WithType<int>()
+ .IntoKey(M::OatFd)
+ .Define("--oat-location=_")
+ .WithType<std::string>()
+ .IntoKey(M::OatLocation);
+}
+
+static void AddImageMappings(Builder& builder) {
+ builder.
+ Define("--image=_")
+ .WithType<std::vector<std::string>>().AppendValues()
+ .IntoKey(M::ImageFilenames)
+ .Define("--image-classes=_")
+ .WithType<std::string>()
+ .IntoKey(M::ImageClasses)
+ .Define("--image-classes-zip=_")
+ .WithType<std::string>()
+ .IntoKey(M::ImageClassesZip)
+ .Define("--base=_")
+ .WithType<std::string>()
+ .IntoKey(M::Base)
+ .Define("--app-image-file=_")
+ .WithType<std::string>()
+ .IntoKey(M::AppImageFile)
+ .Define("--app-image-fd=_")
+ .WithType<int>()
+ .IntoKey(M::AppImageFileFd)
+ .Define("--multi-image")
+ .IntoKey(M::MultiImage)
+ .Define("--dirty-image-objects=_")
+ .WithType<std::string>()
+ .IntoKey(M::DirtyImageObjects)
+ .Define("--image-format=_")
+ .WithType<ImageHeader::StorageMode>()
+ .WithValueMap({{"lz4", ImageHeader::kStorageModeLZ4},
+ {"lz4hc", ImageHeader::kStorageModeLZ4HC},
+ {"uncompressed", ImageHeader::kStorageModeUncompressed}})
+ .IntoKey(M::ImageFormat);
+}
+
+static void AddSwapMappings(Builder& builder) {
+ builder.
+ Define("--swap-file=_")
+ .WithType<std::string>()
+ .IntoKey(M::SwapFile)
+ .Define("--swap-fd=_")
+ .WithType<int>()
+ .IntoKey(M::SwapFileFd)
+ .Define("--swap-dex-size-threshold=_")
+ .WithType<unsigned int>()
+ .IntoKey(M::SwapDexSizeThreshold)
+ .Define("--swap-dex-count-threshold=_")
+ .WithType<unsigned int>()
+ .IntoKey(M::SwapDexCountThreshold);
+}
+
+static void AddCompilerMappings(Builder& builder) {
+ builder.
+ Define("--compiled-classes=_")
+ .WithType<std::string>()
+ .IntoKey(M::CompiledClasses)
+ .Define("--compiled-classes-zip=_")
+ .WithType<std::string>()
+ .IntoKey(M::CompiledClassesZip)
+ .Define("--compiled-methods=_")
+ .WithType<std::string>()
+ .IntoKey(M::CompiledMethods)
+ .Define("--compiled-methods-zip=_")
+ .WithType<std::string>()
+ .IntoKey(M::CompiledMethodsZip)
+ .Define("--run-passes=_")
+ .WithType<std::string>()
+ .IntoKey(M::Passes)
+ .Define("--profile-file=_")
+ .WithType<std::string>()
+ .IntoKey(M::Profile)
+ .Define("--profile-file-fd=_")
+ .WithType<int>()
+ .IntoKey(M::ProfileFd)
+ .Define("--no-inline-from=_")
+ .WithType<std::string>()
+ .IntoKey(M::NoInlineFrom);
+}
+
+static void AddTargetMappings(Builder& builder) {
+ builder.
+ Define("--instruction-set=_")
+ .WithType<InstructionSet>()
+ .IntoKey(M::TargetInstructionSet)
+ .Define("--instruction-set-variant=_")
+ .WithType<std::string>()
+ .IntoKey(M::TargetInstructionSetVariant)
+ .Define("--instruction-set-features=_")
+ .WithType<std::string>()
+ .IntoKey(M::TargetInstructionSetFeatures);
+}
+
+static Parser CreateArgumentParser() {
+ std::unique_ptr<Builder> parser_builder = std::unique_ptr<Builder>(new Builder());
+
+ AddInputMappings(*parser_builder);
+ AddGeneratedArtifactMappings(*parser_builder);
+ AddImageMappings(*parser_builder);
+ AddSwapMappings(*parser_builder);
+ AddCompilerMappings(*parser_builder);
+ AddTargetMappings(*parser_builder);
+
+ parser_builder->
+ Define({"--watch-dog", "--no-watch-dog"})
+ .WithValues({true, false})
+ .IntoKey(M::Watchdog)
+ .Define("--watchdog-timeout=_")
+ .WithType<int>()
+ .IntoKey(M::WatchdogTimeout)
+ .Define("-j_")
+ .WithType<unsigned int>()
+ .IntoKey(M::Threads)
+ .Define("--android-root=_")
+ .WithType<std::string>()
+ .IntoKey(M::AndroidRoot)
+ .Define("--compiler-backend=_")
+ .WithType<Compiler::Kind>()
+ .WithValueMap({{"Quick", Compiler::Kind::kQuick},
+ {"Optimizing", Compiler::Kind::kOptimizing}})
+ .IntoKey(M::Backend)
+ .Define("--host")
+ .IntoKey(M::Host)
+ .Define("--dump-timing")
+ .IntoKey(M::DumpTiming)
+ .Define("--dump-passes")
+ .IntoKey(M::DumpPasses)
+ .Define("--dump-stats")
+ .IntoKey(M::DumpStats)
+ .Define("--avoid-storing-invocation")
+ .IntoKey(M::AvoidStoringInvocation)
+ .Define("--very-large-app-threshold=_")
+ .WithType<unsigned int>()
+ .IntoKey(M::VeryLargeAppThreshold)
+ .Define("--force-determinism")
+ .IntoKey(M::ForceDeterminism)
+ .Define("--classpath-dir=_")
+ .WithType<std::string>()
+ .IntoKey(M::ClasspathDir)
+ .Define("--class-loader-context=_")
+ .WithType<std::string>()
+ .IntoKey(M::ClassLoaderContext)
+ .Define("--runtime-arg _")
+ .WithType<std::vector<std::string>>().AppendValues()
+ .IntoKey(M::RuntimeOptions);
+
+ AddCompilerOptionsArgumentParserOptions<Dex2oatArgumentMap>(*parser_builder);
+
+ parser_builder->IgnoreUnrecognized(false);
+
+ return parser_builder->Build();
+}
+
+#pragma GCC diagnostic pop
+
+std::unique_ptr<Dex2oatArgumentMap> Dex2oatArgumentMap::Parse(int argc,
+ const char** argv,
+ std::string* error_msg) {
+ Parser parser = CreateArgumentParser();
+ CmdlineResult parse_result = parser.Parse(argv, argc);
+ if (!parse_result.IsSuccess()) {
+ *error_msg = parse_result.GetMessage();
+ return nullptr;
+ }
+
+ return std::unique_ptr<Dex2oatArgumentMap>(new Dex2oatArgumentMap(parser.ReleaseArgumentsMap()));
+}
+
+} // namespace art
diff --git a/dex2oat/dex2oat_options.def b/dex2oat/dex2oat_options.def
new file mode 100644
index 0000000..83a3035
--- /dev/null
+++ b/dex2oat/dex2oat_options.def
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License")
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DEX2OAT_OPTIONS_KEY
+#error "Please #define DEX2OAT_OPTIONS_KEY before #including this file"
+#define DEX2OAT_OPTIONS_KEY(...) // Don't display errors in this file in IDEs.
+#endif
+
+// This file defines the list of keys for Dex2oatOptions.
+// These can be used with Dex2oatOptions.Get/Set/etc, for example:
+// Dex2oatOptions opt; bool* dex2oat_enabled = opt.Get(Dex2oatOptions::Dex2Oat);
+//
+// Column Descriptions:
+// <<Type>> <<Key Name>> <<Default Value>>
+//
+// Default values are only used by Map::GetOrDefault(K<T>).
+// If a default value is omitted here, T{} is used as the default value, which is
+// almost-always the value of the type as if it was memset to all 0.
+//
+// Please keep the columns aligned if possible when adding new rows.
+//
+
+// Parse-able keys from the command line.
+DEX2OAT_OPTIONS_KEY (std::vector<std::string>, DexFiles)
+DEX2OAT_OPTIONS_KEY (std::vector<std::string>, DexLocations)
+DEX2OAT_OPTIONS_KEY (int, ZipFd)
+DEX2OAT_OPTIONS_KEY (std::string, ZipLocation)
+DEX2OAT_OPTIONS_KEY (int, InputVdexFd)
+DEX2OAT_OPTIONS_KEY (std::string, InputVdex)
+DEX2OAT_OPTIONS_KEY (int, OutputVdexFd)
+DEX2OAT_OPTIONS_KEY (std::string, OutputVdex)
+DEX2OAT_OPTIONS_KEY (std::vector<std::string>, OatFiles)
+DEX2OAT_OPTIONS_KEY (std::vector<std::string>, OatSymbols)
+DEX2OAT_OPTIONS_KEY (int, OatFd)
+DEX2OAT_OPTIONS_KEY (std::string, OatLocation)
+DEX2OAT_OPTIONS_KEY (bool, Watchdog)
+DEX2OAT_OPTIONS_KEY (int, WatchdogTimeout)
+DEX2OAT_OPTIONS_KEY (unsigned int, Threads)
+DEX2OAT_OPTIONS_KEY (std::vector<std::string>, ImageFilenames)
+DEX2OAT_OPTIONS_KEY (std::string, ImageClasses)
+DEX2OAT_OPTIONS_KEY (std::string, ImageClassesZip)
+DEX2OAT_OPTIONS_KEY (ImageHeader::StorageMode, ImageFormat)
+DEX2OAT_OPTIONS_KEY (std::string, CompiledClasses)
+DEX2OAT_OPTIONS_KEY (std::string, CompiledClassesZip)
+DEX2OAT_OPTIONS_KEY (std::string, CompiledMethods)
+DEX2OAT_OPTIONS_KEY (std::string, CompiledMethodsZip)
+DEX2OAT_OPTIONS_KEY (std::string, Passes)
+DEX2OAT_OPTIONS_KEY (std::string, Base) // TODO: Hex string parsing.
+DEX2OAT_OPTIONS_KEY (std::string, BootImage)
+DEX2OAT_OPTIONS_KEY (std::string, AndroidRoot)
+DEX2OAT_OPTIONS_KEY (InstructionSet, TargetInstructionSet)
+DEX2OAT_OPTIONS_KEY (std::string, TargetInstructionSetVariant)
+DEX2OAT_OPTIONS_KEY (std::string, TargetInstructionSetFeatures)
+DEX2OAT_OPTIONS_KEY (Compiler::Kind, Backend)
+DEX2OAT_OPTIONS_KEY (std::string, Profile)
+DEX2OAT_OPTIONS_KEY (int, ProfileFd)
+DEX2OAT_OPTIONS_KEY (Unit, Host)
+DEX2OAT_OPTIONS_KEY (Unit, DumpTiming)
+DEX2OAT_OPTIONS_KEY (Unit, DumpPasses)
+DEX2OAT_OPTIONS_KEY (Unit, DumpStats)
+DEX2OAT_OPTIONS_KEY (Unit, AvoidStoringInvocation)
+DEX2OAT_OPTIONS_KEY (std::string, SwapFile)
+DEX2OAT_OPTIONS_KEY (int, SwapFileFd)
+DEX2OAT_OPTIONS_KEY (unsigned int, SwapDexSizeThreshold)
+DEX2OAT_OPTIONS_KEY (unsigned int, SwapDexCountThreshold)
+DEX2OAT_OPTIONS_KEY (unsigned int, VeryLargeAppThreshold)
+DEX2OAT_OPTIONS_KEY (std::string, AppImageFile)
+DEX2OAT_OPTIONS_KEY (int, AppImageFileFd)
+DEX2OAT_OPTIONS_KEY (Unit, MultiImage)
+DEX2OAT_OPTIONS_KEY (std::string, NoInlineFrom)
+DEX2OAT_OPTIONS_KEY (Unit, ForceDeterminism)
+DEX2OAT_OPTIONS_KEY (std::string, ClasspathDir)
+DEX2OAT_OPTIONS_KEY (std::string, ClassLoaderContext)
+DEX2OAT_OPTIONS_KEY (std::string, DirtyImageObjects)
+DEX2OAT_OPTIONS_KEY (std::vector<std::string>, RuntimeOptions)
+
+#undef DEX2OAT_OPTIONS_KEY
diff --git a/dex2oat/dex2oat_options.h b/dex2oat/dex2oat_options.h
new file mode 100644
index 0000000..a4c7186
--- /dev/null
+++ b/dex2oat/dex2oat_options.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_DEX2OAT_DEX2OAT_OPTIONS_H_
+#define ART_DEX2OAT_DEX2OAT_OPTIONS_H_
+
+#include <cstdio>
+#include <string>
+#include <vector>
+
+#include "base/variant_map.h"
+#include "cmdline_types.h" // TODO: don't need to include this file here
+#include "compiler.h"
+#include "driver/compiler_options_map.h"
+#include "image.h"
+
+namespace art {
+
+template <typename TVariantMap,
+ template <typename TKeyValue> class TVariantMapKey>
+struct CmdlineParser;
+
+// Define a key that is usable with a Dex2oatArgumentMap.
+// This key will *not* work with other subtypes of VariantMap.
+template <typename TValue>
+struct Dex2oatArgumentMapKey : VariantMapKey<TValue> {
+ Dex2oatArgumentMapKey() {}
+ explicit Dex2oatArgumentMapKey(TValue default_value)
+ : VariantMapKey<TValue>(std::move(default_value)) {}
+ // Don't ODR-use constexpr default values, which means that Struct::Fields
+ // that are declared 'static constexpr T Name = Value' don't need to have a matching definition.
+};
+
+// Defines a type-safe heterogeneous key->value map.
+// Use the VariantMap interface to look up or to store a Dex2oatArgumentMapKey,Value pair.
+//
+// Example:
+// auto map = Dex2oatArgumentMap();
+// map.Set(Dex2oatArgumentMap::ZipFd, -1);
+// int *target_utilization = map.Get(Dex2oatArgumentMap::ZipFd);
+//
+struct Dex2oatArgumentMap : CompilerOptionsMap<Dex2oatArgumentMap, Dex2oatArgumentMapKey> {
+ // This 'using' line is necessary to inherit the variadic constructor.
+ using CompilerOptionsMap<Dex2oatArgumentMap, Dex2oatArgumentMapKey>::CompilerOptionsMap;
+
+ static std::unique_ptr<Dex2oatArgumentMap> Parse(int argc,
+ const char** argv,
+ std::string* error_msg);
+
+ // Make the next many usages of Key slightly shorter to type.
+ template <typename TValue>
+ using Key = Dex2oatArgumentMapKey<TValue>;
+
+ // List of key declarations, shorthand for 'static const Key<T> Name'
+#define DEX2OAT_OPTIONS_KEY(Type, Name, ...) static const Key<Type> (Name);
+#include "dex2oat_options.def"
+};
+
+extern template struct CompilerOptionsMap<Dex2oatArgumentMap, Dex2oatArgumentMapKey>;
+
+} // namespace art
+
+#endif // ART_DEX2OAT_DEX2OAT_OPTIONS_H_
diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc
index d89d9f0..50434ef 100644
--- a/dex2oat/linker/oat_writer_test.cc
+++ b/dex2oat/linker/oat_writer_test.cc
@@ -48,16 +48,6 @@
namespace art {
namespace linker {
-NO_RETURN static void Usage(const char* fmt, ...) {
- va_list ap;
- va_start(ap, fmt);
- std::string error;
- android::base::StringAppendV(&error, fmt, ap);
- LOG(FATAL) << error;
- va_end(ap);
- UNREACHABLE();
-}
-
class OatTest : public CommonCompilerTest {
protected:
static const bool kCompile = false; // DISABLED_ due to the time to compile libcore
@@ -101,8 +91,11 @@
insn_features_ = InstructionSetFeatures::FromVariant(insn_set, "default", error_msg);
ASSERT_TRUE(insn_features_ != nullptr) << *error_msg;
compiler_options_.reset(new CompilerOptions);
- for (const std::string& option : compiler_options) {
- compiler_options_->ParseCompilerOption(option, Usage);
+ if (!compiler_options_->ParseCompilerOptions(compiler_options,
+ false /* ignore_unrecognized */,
+ error_msg)) {
+ LOG(FATAL) << *error_msg;
+ UNREACHABLE();
}
verification_results_.reset(new VerificationResults(compiler_options_.get()));
callbacks_.reset(new QuickCompilerCallbacks(CompilerCallbacks::CallbackMode::kCompileApp));
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index 5d9bf2c..98fad80 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -1096,15 +1096,19 @@
hs.NewHandle(GetClassLoader()),
dex_file_->GetClassDef(0), /*class_def*/
nullptr, /*compiler_callbacks*/
- false, /*allow_soft_failures*/
+ true, /*allow_soft_failures*/
/*log_level*/
art::verifier::HardFailLogMode::kLogWarning,
&error);
- bool passes = failure == art::verifier::FailureKind::kNoFailure;
- if (!passes) {
- RecordFailure(ERR(FAILS_VERIFICATION), "Failed to verify class. Error was: " + error);
+ switch (failure) {
+ case art::verifier::FailureKind::kNoFailure:
+ case art::verifier::FailureKind::kSoftFailure:
+ return true;
+ case art::verifier::FailureKind::kHardFailure: {
+ RecordFailure(ERR(FAILS_VERIFICATION), "Failed to verify class. Error was: " + error);
+ return false;
+ }
}
- return passes;
}
// Looks through the previously allocated cookies to see if we need to update them with another new
@@ -1399,7 +1403,9 @@
method.SetNotIntrinsic();
// Notify the jit that this method is redefined.
art::jit::Jit* jit = driver_->runtime_->GetJit();
- if (jit != nullptr) {
+ // Non-invokable methods don't have any JIT data associated with them so we don't need to tell
+ // the jit about them.
+ if (jit != nullptr && method.IsInvokable()) {
jit->GetCodeCache()->NotifyMethodRedefined(&method);
}
}
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 711bc65..ea776e7 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -530,6 +530,7 @@
"barrier_test.cc",
"base/arena_allocator_test.cc",
"base/bit_field_test.cc",
+ "base/bit_struct_test.cc",
"base/bit_utils_test.cc",
"base/bit_vector_test.cc",
"base/hash_set_test.cc",
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index 8738adf..c48e30f 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -149,7 +149,10 @@
os << "===== Allocation by kind\n";
static_assert(arraysize(kAllocNames) == kNumArenaAllocKinds, "arraysize of kAllocNames");
for (int i = 0; i < kNumArenaAllocKinds; i++) {
+ // Reduce output by listing only allocation kinds that actually have allocations.
+ if (alloc_stats_[i] != 0u) {
os << kAllocNames[i] << std::setw(10) << alloc_stats_[i] << "\n";
+ }
}
}
diff --git a/runtime/base/arena_allocator_test.cc b/runtime/base/arena_allocator_test.cc
index e2c2e2f..6bf56c8 100644
--- a/runtime/base/arena_allocator_test.cc
+++ b/runtime/base/arena_allocator_test.cc
@@ -23,9 +23,9 @@
class ArenaAllocatorTest : public testing::Test {
protected:
- size_t NumberOfArenas(ArenaAllocator* arena) {
+ size_t NumberOfArenas(ArenaAllocator* allocator) {
size_t result = 0u;
- for (Arena* a = arena->arena_head_; a != nullptr; a = a->next_) {
+ for (Arena* a = allocator->arena_head_; a != nullptr; a = a->next_) {
++result;
}
return result;
diff --git a/runtime/base/arena_containers.h b/runtime/base/arena_containers.h
index 62b974e..2e71156 100644
--- a/runtime/base/arena_containers.h
+++ b/runtime/base/arena_containers.h
@@ -137,22 +137,22 @@
typedef ArenaAllocatorAdapter<U> other;
};
- explicit ArenaAllocatorAdapter(ArenaAllocator* arena_allocator,
+ explicit ArenaAllocatorAdapter(ArenaAllocator* allocator,
ArenaAllocKind kind = kArenaAllocSTL)
: ArenaAllocatorAdapterKind(kind),
- arena_allocator_(arena_allocator) {
+ allocator_(allocator) {
}
template <typename U>
ArenaAllocatorAdapter(const ArenaAllocatorAdapter<U>& other) // NOLINT, implicit
: ArenaAllocatorAdapterKind(other),
- arena_allocator_(other.arena_allocator_) {
+ allocator_(other.allocator_) {
}
ArenaAllocatorAdapter(const ArenaAllocatorAdapter&) = default;
ArenaAllocatorAdapter& operator=(const ArenaAllocatorAdapter&) = default;
~ArenaAllocatorAdapter() = default;
private:
- ArenaAllocator* arena_allocator_;
+ ArenaAllocator* allocator_;
template <typename U>
friend class ArenaAllocatorAdapter;
@@ -174,14 +174,14 @@
typedef ArenaAllocatorAdapter<U> other;
};
- ArenaAllocatorAdapter(ArenaAllocator* arena_allocator, ArenaAllocKind kind)
+ ArenaAllocatorAdapter(ArenaAllocator* allocator, ArenaAllocKind kind)
: ArenaAllocatorAdapterKind(kind),
- arena_allocator_(arena_allocator) {
+ allocator_(allocator) {
}
template <typename U>
ArenaAllocatorAdapter(const ArenaAllocatorAdapter<U>& other) // NOLINT, implicit
: ArenaAllocatorAdapterKind(other),
- arena_allocator_(other.arena_allocator_) {
+ allocator_(other.allocator_) {
}
ArenaAllocatorAdapter(const ArenaAllocatorAdapter&) = default;
ArenaAllocatorAdapter& operator=(const ArenaAllocatorAdapter&) = default;
@@ -197,10 +197,10 @@
pointer allocate(size_type n,
ArenaAllocatorAdapter<void>::pointer hint ATTRIBUTE_UNUSED = nullptr) {
DCHECK_LE(n, max_size());
- return arena_allocator_->AllocArray<T>(n, ArenaAllocatorAdapterKind::Kind());
+ return allocator_->AllocArray<T>(n, ArenaAllocatorAdapterKind::Kind());
}
void deallocate(pointer p, size_type n) {
- arena_allocator_->MakeInaccessible(p, sizeof(T) * n);
+ allocator_->MakeInaccessible(p, sizeof(T) * n);
}
template <typename U, typename... Args>
@@ -213,7 +213,7 @@
}
private:
- ArenaAllocator* arena_allocator_;
+ ArenaAllocator* allocator_;
template <typename U>
friend class ArenaAllocatorAdapter;
@@ -226,7 +226,7 @@
template <typename T>
inline bool operator==(const ArenaAllocatorAdapter<T>& lhs,
const ArenaAllocatorAdapter<T>& rhs) {
- return lhs.arena_allocator_ == rhs.arena_allocator_;
+ return lhs.allocator_ == rhs.allocator_;
}
template <typename T>
diff --git a/runtime/base/arena_object.h b/runtime/base/arena_object.h
index 2d8e7d8..ed00bab 100644
--- a/runtime/base/arena_object.h
+++ b/runtime/base/arena_object.h
@@ -32,8 +32,8 @@
return allocator->Alloc(size, kAllocKind);
}
- static void* operator new(size_t size, ScopedArenaAllocator* arena) {
- return arena->Alloc(size, kAllocKind);
+ static void* operator new(size_t size, ScopedArenaAllocator* allocator) {
+ return allocator->Alloc(size, kAllocKind);
}
void operator delete(void*, size_t) {
@@ -56,8 +56,8 @@
return allocator->Alloc(size, kAllocKind);
}
- static void* operator new(size_t size, ScopedArenaAllocator* arena) {
- return arena->Alloc(size, kAllocKind);
+ static void* operator new(size_t size, ScopedArenaAllocator* allocator) {
+ return allocator->Alloc(size, kAllocKind);
}
void operator delete(void*, size_t) {
diff --git a/runtime/base/bit_struct.h b/runtime/base/bit_struct.h
new file mode 100644
index 0000000..1f86ee1
--- /dev/null
+++ b/runtime/base/bit_struct.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_BIT_STRUCT_H_
+#define ART_RUNTIME_BASE_BIT_STRUCT_H_
+
+#include "bit_struct_detail.h"
+#include "bit_utils.h"
+
+//
+// Zero-cost, type-safe, well-defined "structs" of bit fields.
+//
+// ---------------------------------------------
+// Usage example:
+// ---------------------------------------------
+//
+// // Definition for type 'Example'
+// BITSTRUCT_DEFINE_START(Example, 10)
+// BitStructUint<0, 2> u2; // Every field must be a BitStruct[*].
+// BitStructInt<2, 7> i7;
+// BitStructUint<9, 1> i1;
+// BITSTRUCT_DEFINE_END(Example);
+//
+// Would define a bit struct with this layout:
+// <- 1 -> <-- 7 --> <- 2 ->
+// +--------+---------------+-----+
+// | i1 | i7 | u2 +
+// +--------+---------------+-----+
+// 10 9 2 0
+//
+// // Read-write just like regular values.
+// Example ex;
+// ex.u2 = 3;
+// ex.i7 = -25;
+// ex.i1 = true;
+// size_t u2 = ex.u2;
+// int i7 = ex.i7;
+// bool i1 = ex.i1;
+//
+// // It's packed down to the smallest # of machine words.
+// assert(sizeof(Example) == 2);
+// // The exact bit pattern is well-defined by the template parameters.
+// uint16_t cast = *reinterpret_cast<uint16_t*>(ex);
+// assert(cast == ((3) | (0b100111 << 2) | (true << 9);
+//
+// ---------------------------------------------
+// Why not just use C++ bitfields?
+// ---------------------------------------------
+//
+// The layout is implementation-defined.
+// We do not know whether the fields are packed left-to-right or
+// right-to-left, so it makes it useless when the memory layout needs to be
+// precisely controlled.
+//
+// ---------------------------------------------
+// More info:
+// ---------------------------------------------
+// Currently uintmax_t is the largest supported underlying storage type,
+// all (kBitOffset + kBitWidth) must fit into BitSizeOf<uintmax_t>();
+//
+// Using BitStruct[U]int will automatically select an underlying type
+// that's the smallest to fit your (offset + bitwidth).
+//
+// BitStructNumber can be used to manually select an underlying type.
+//
+// BitStructField can be used with custom standard-layout structs,
+// thus allowing for arbitrary nesting of bit structs.
+//
+namespace art {
+// Zero-cost wrapper around a struct 'T', allowing it to be stored as a bitfield
+// at offset 'kBitOffset' and width 'kBitWidth'.
+// The storage is plain unsigned int, whose size is the smallest required to fit
+// 'kBitOffset + kBitWidth'. All operations to this become BitFieldExtract/BitFieldInsert
+// operations to the underlying uint.
+//
+// Field memory representation:
+//
+// MSB <-- width --> LSB
+// +--------+------------+--------+
+// | ?????? | u bitfield | ?????? +
+// +--------+------------+--------+
+// offset 0
+//
+// Reading/writing the bitfield (un)packs it into a temporary T:
+//
+// MSB <-- width --> LSB
+// +-----------------+------------+
+// | 0.............0 | T bitfield |
+// +-----------------+------------+
+// 0
+//
+// It's the responsibility of the StorageType to ensure the bit representation
+// of T can be represented by kBitWidth.
+template <typename T,
+ size_t kBitOffset,
+ size_t kBitWidth = BitStructSizeOf<T>(),
+ typename StorageType = typename detail::MinimumTypeUnsignedHelper<kBitOffset + kBitWidth>::type>
+struct BitStructField {
+ static_assert(std::is_standard_layout<T>::value, "T must be standard layout");
+
+ operator T() const {
+ return Get();
+ }
+
+ // Exclude overload when T==StorageType.
+ template <typename _ = void,
+ typename = std::enable_if_t<std::is_same<T, StorageType>::value, _>>
+ explicit operator StorageType() const {
+ return GetStorage();
+ }
+
+ BitStructField& operator=(T value) {
+ return Assign(*this, value);
+ }
+
+ static constexpr size_t BitStructSizeOf() {
+ return kBitWidth;
+ }
+
+ protected:
+ template <typename T2>
+ T2& Assign(T2& what, T value) {
+ // Since C++ doesn't allow the type of operator= to change out
+ // in the subclass, reimplement operator= in each subclass
+ // manually and call this helper function.
+ static_assert(std::is_base_of<BitStructField, T2>::value, "T2 must inherit BitStructField");
+ what.Set(value);
+ return what;
+ }
+
+ T Get() const {
+ ValueStorage vs;
+ vs.pod_.val_ = GetStorage();
+ return vs.value_;
+ }
+
+ void Set(T value) {
+ ValueStorage value_as_storage;
+ value_as_storage.value_ = value;
+
+ storage_.pod_.val_ = BitFieldInsert(storage_.pod_.val_,
+ value_as_storage.pod_.val_,
+ kBitOffset,
+ kBitWidth);
+ }
+
+ private:
+ StorageType GetStorage() const {
+ return BitFieldExtract(storage_.pod_.val_, kBitOffset, kBitWidth);
+ }
+
+ // Underlying value must be wrapped in a separate standard-layout struct.
+ // See below for more details.
+ struct PodWrapper {
+ StorageType val_;
+ };
+
+ union ValueStorage {
+ // Safely alias pod_ and value_ together.
+ //
+ // See C++ 9.5.1 [class.union]:
+ // If a standard-layout union contains several standard-layout structs that share a common
+ // initial sequence ... it is permitted to inspect the common initial sequence of any of
+ // standard-layout struct members.
+ PodWrapper pod_;
+ T value_;
+ } storage_;
+
+ // Future work: In theory almost non-standard layout can be supported here,
+ // assuming they don't rely on the address of (this).
+ // We just have to use memcpy since the union-aliasing would not work.
+};
+
+// Base class for number-like BitStruct fields.
+// T is the type to store in as a bit field.
+// kBitOffset, kBitWidth define the position and length of the bitfield.
+//
+// (Common usage should be BitStructInt, BitStructUint -- this
+// intermediate template allows a user-defined integer to be used.)
+template <typename T, size_t kBitOffset, size_t kBitWidth>
+struct BitStructNumber : public BitStructField<T, kBitOffset, kBitWidth, /*StorageType*/T> {
+ using StorageType = T;
+
+ BitStructNumber& operator=(T value) {
+ return BaseType::Assign(*this, value);
+ }
+
+ /*implicit*/ operator T() const {
+ return Get();
+ }
+
+ explicit operator bool() const {
+ return static_cast<bool>(Get());
+ }
+
+ BitStructNumber& operator++() {
+ *this = Get() + 1u;
+ return *this;
+ }
+
+ StorageType operator++(int) {
+ return Get() + 1u;
+ }
+
+ BitStructNumber& operator--() {
+ *this = Get() - 1u;
+ return *this;
+ }
+
+ StorageType operator--(int) {
+ return Get() - 1u;
+ }
+
+ private:
+ using BaseType = BitStructField<T, kBitOffset, kBitWidth, /*StorageType*/T>;
+ using BaseType::Get;
+};
+
+// Create a BitStruct field which uses the smallest underlying int storage type,
+// in order to be large enough to fit (kBitOffset + kBitWidth).
+//
+// Values are sign-extended when they are read out.
+template <size_t kBitOffset, size_t kBitWidth>
+using BitStructInt =
+ BitStructNumber<typename detail::MinimumTypeHelper<int, kBitOffset + kBitWidth>::type,
+ kBitOffset,
+ kBitWidth>;
+
+// Create a BitStruct field which uses the smallest underlying uint storage type,
+// in order to be large enough to fit (kBitOffset + kBitWidth).
+//
+// Values are zero-extended when they are read out.
+template <size_t kBitOffset, size_t kBitWidth>
+using BitStructUint =
+ BitStructNumber<typename detail::MinimumTypeHelper<unsigned int, kBitOffset + kBitWidth>::type,
+ kBitOffset,
+ kBitWidth>;
+
+// Start a definition for a bitstruct.
+// A bitstruct is defined to be a union with a common initial subsequence
+// that we call 'DefineBitStructSize<bitwidth>'.
+//
+// See top of file for usage example.
+//
+// This marker is required by the C++ standard in order to
+// have a "common initial sequence".
+//
+// See C++ 9.5.1 [class.union]:
+// If a standard-layout union contains several standard-layout structs that share a common
+// initial sequence ... it is permitted to inspect the common initial sequence of any of
+// standard-layout struct members.
+#define BITSTRUCT_DEFINE_START(name, bitwidth) \
+ union name { \
+ art::detail::DefineBitStructSize<(bitwidth)> _; \
+ static constexpr size_t BitStructSizeOf() { return (bitwidth); }
+
+// End the definition of a bitstruct, and insert a sanity check
+// to ensure that the bitstruct did not exceed the specified size.
+//
+// See top of file for usage example.
+#define BITSTRUCT_DEFINE_END(name) \
+ }; /* NOLINT [readability/braces] [4] */ \
+ static_assert(art::detail::ValidateBitStructSize<name>(), \
+ #name "bitsize incorrect: " \
+ "did you insert extra fields that weren't BitStructX, " \
+ "and does the size match the sum of the field widths?")
+
+// Determine the minimal bit size for a user-defined type T.
+// Used by BitStructField to determine how small a custom type is.
+template <typename T>
+static constexpr size_t BitStructSizeOf() {
+ return T::BitStructSizeOf();
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_BASE_BIT_STRUCT_H_
diff --git a/runtime/base/bit_struct_detail.h b/runtime/base/bit_struct_detail.h
new file mode 100644
index 0000000..9f629c0
--- /dev/null
+++ b/runtime/base/bit_struct_detail.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_BIT_STRUCT_DETAIL_H_
+#define ART_RUNTIME_BASE_BIT_STRUCT_DETAIL_H_
+
+#include "bit_utils.h"
+#include "globals.h"
+
+#include <type_traits>
+
+// Implementation details for bit_struct.h
+// Not intended to be used stand-alone.
+
+namespace art {
+
+template <typename T>
+static constexpr size_t BitStructSizeOf();
+
+namespace detail {
+ // Select the smallest uintX_t that will fit kBitSize bits.
+ template <size_t kBitSize>
+ struct MinimumTypeUnsignedHelper {
+ using type =
+ typename std::conditional<kBitSize == 0, void,
+ typename std::conditional<kBitSize <= 8, uint8_t,
+ typename std::conditional<kBitSize <= 16, uint16_t,
+ typename std::conditional<kBitSize <= 32, uint32_t,
+ typename std::conditional<kBitSize <= 64, uint64_t,
+ typename std::conditional<kBitSize <= BitSizeOf<uintmax_t>(), uintmax_t,
+ void>::type>::type>::type>::type>::type>::type;
+ };
+
+ // Select the smallest [u]intX_t that will fit kBitSize bits.
+ // Automatically picks intX_t or uintX_t based on the sign-ness of T.
+ template <typename T, size_t kBitSize>
+ struct MinimumTypeHelper {
+ using type_unsigned = typename MinimumTypeUnsignedHelper<kBitSize>::type;
+
+ using type =
+ typename std::conditional</* if */ std::is_signed<T>::value,
+ /* then */ typename std::make_signed<type_unsigned>::type,
+ /* else */ type_unsigned>::type;
+ };
+
+ // Ensure the minimal type storage for 'T' matches its declared BitStructSizeOf.
+ // Nominally used by the BITSTRUCT_DEFINE_END macro.
+ template <typename T>
+ static constexpr bool ValidateBitStructSize() {
+ const size_t kBitStructSizeOf = BitStructSizeOf<T>();
+ const size_t kExpectedSize = (BitStructSizeOf<T>() < kBitsPerByte)
+ ? kBitsPerByte
+ : RoundUpToPowerOfTwo(kBitStructSizeOf);
+
+ // Ensure no extra fields were added in between START/END.
+ const size_t kActualSize = sizeof(T) * kBitsPerByte;
+ return kExpectedSize == kActualSize;
+ }
+
+ // Denotes the beginning of a bit struct.
+ //
+ // This marker is required by the C++ standard in order to
+ // have a "common initial sequence".
+ //
+ // See C++ 9.5.1 [class.union]:
+ // If a standard-layout union contains several standard-layout structs that share a common
+ // initial sequence ... it is permitted to inspect the common initial sequence of any of
+ // standard-layout struct members.
+ template <size_t kSize>
+ struct DefineBitStructSize {
+ private:
+ typename MinimumTypeUnsignedHelper<kSize>::type _;
+ };
+} // namespace detail
+} // namespace art
+
+#endif // ART_RUNTIME_BASE_BIT_STRUCT_DETAIL_H_
diff --git a/runtime/base/bit_struct_test.cc b/runtime/base/bit_struct_test.cc
new file mode 100644
index 0000000..872ada3
--- /dev/null
+++ b/runtime/base/bit_struct_test.cc
@@ -0,0 +1,269 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "bit_struct.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+// A copy of detail::ValidateBitStructSize that uses EXPECT for a more
+// human-readable message.
+template <typename T>
+static constexpr bool ValidateBitStructSize(const char* name) {
+ const size_t kBitStructSizeOf = BitStructSizeOf<T>();
+ const size_t kExpectedSize = (BitStructSizeOf<T>() < kBitsPerByte)
+ ? kBitsPerByte
+ : RoundUpToPowerOfTwo(kBitStructSizeOf);
+
+ // Ensure no extra fields were added in between START/END.
+ const size_t kActualSize = sizeof(T) * kBitsPerByte;
+ EXPECT_EQ(kExpectedSize, kActualSize) << name;
+ return true;
+}
+
+#define VALIDATE_BITSTRUCT_SIZE(type) ValidateBitStructSize<type>(#type)
+
+TEST(BitStructs, MinimumType) {
+ EXPECT_EQ(1u, sizeof(typename detail::MinimumTypeUnsignedHelper<1>::type));
+ EXPECT_EQ(1u, sizeof(typename detail::MinimumTypeUnsignedHelper<2>::type));
+ EXPECT_EQ(1u, sizeof(typename detail::MinimumTypeUnsignedHelper<3>::type));
+ EXPECT_EQ(1u, sizeof(typename detail::MinimumTypeUnsignedHelper<8>::type));
+ EXPECT_EQ(2u, sizeof(typename detail::MinimumTypeUnsignedHelper<9>::type));
+ EXPECT_EQ(2u, sizeof(typename detail::MinimumTypeUnsignedHelper<10>::type));
+ EXPECT_EQ(2u, sizeof(typename detail::MinimumTypeUnsignedHelper<15>::type));
+ EXPECT_EQ(2u, sizeof(typename detail::MinimumTypeUnsignedHelper<16>::type));
+ EXPECT_EQ(4u, sizeof(typename detail::MinimumTypeUnsignedHelper<17>::type));
+ EXPECT_EQ(4u, sizeof(typename detail::MinimumTypeUnsignedHelper<32>::type));
+ EXPECT_EQ(8u, sizeof(typename detail::MinimumTypeUnsignedHelper<33>::type));
+ EXPECT_EQ(8u, sizeof(typename detail::MinimumTypeUnsignedHelper<64>::type));
+}
+
+template <typename T>
+size_t AsUint(const T& value) {
+ size_t uint_value = 0;
+ memcpy(&uint_value, &value, sizeof(value));
+ return uint_value;
+}
+
+struct CustomBitStruct {
+ CustomBitStruct() = default;
+ explicit CustomBitStruct(int8_t data) : data(data) {}
+
+ static constexpr size_t BitStructSizeOf() {
+ return 4;
+ }
+
+ int8_t data;
+};
+
+template <typename T>
+void ZeroInitialize(T& value) {
+ memset(&value, 0, sizeof(T));
+ // TODO: replace with value initialization
+}
+
+TEST(BitStructs, Custom) {
+ CustomBitStruct expected(0b1111);
+
+ BitStructField<CustomBitStruct, /*lsb*/4, /*width*/4> f;
+ ZeroInitialize(f);
+
+ EXPECT_EQ(1u, sizeof(f));
+
+ f = CustomBitStruct(0b1111);
+
+ CustomBitStruct read_out = f;
+ EXPECT_EQ(read_out.data, 0b1111);
+
+ EXPECT_EQ(AsUint(f), 0b11110000u);
+}
+
+BITSTRUCT_DEFINE_START(TestTwoCustom, /* size */ 8)
+ BitStructField<CustomBitStruct, /*lsb*/0, /*width*/4> f4_a;
+ BitStructField<CustomBitStruct, /*lsb*/4, /*width*/4> f4_b;
+BITSTRUCT_DEFINE_END(TestTwoCustom);
+
+TEST(BitStructs, TwoCustom) {
+ EXPECT_EQ(sizeof(TestTwoCustom), 1u);
+
+ VALIDATE_BITSTRUCT_SIZE(TestTwoCustom);
+
+ TestTwoCustom cst;
+ ZeroInitialize(cst);
+
+ // Test the write to most-significant field doesn't clobber least-significant.
+ cst.f4_a = CustomBitStruct(0b0110);
+ cst.f4_b = CustomBitStruct(0b0101);
+
+ int8_t read_out = static_cast<CustomBitStruct>(cst.f4_a).data;
+ int8_t read_out_b = static_cast<CustomBitStruct>(cst.f4_b).data;
+
+ EXPECT_EQ(0b0110, static_cast<int>(read_out));
+ EXPECT_EQ(0b0101, static_cast<int>(read_out_b));
+
+ EXPECT_EQ(AsUint(cst), 0b01010110u);
+
+ // Test write to least-significant field doesn't clobber most-significant.
+ cst.f4_a = CustomBitStruct(0);
+
+ read_out = static_cast<CustomBitStruct>(cst.f4_a).data;
+ read_out_b = static_cast<CustomBitStruct>(cst.f4_b).data;
+
+ EXPECT_EQ(0b0, static_cast<int>(read_out));
+ EXPECT_EQ(0b0101, static_cast<int>(read_out_b));
+
+ EXPECT_EQ(AsUint(cst), 0b01010000u);
+}
+
+TEST(BitStructs, Number) {
+ BitStructNumber<uint16_t, /*lsb*/4, /*width*/4> bsn;
+ ZeroInitialize(bsn);
+ EXPECT_EQ(2u, sizeof(bsn));
+
+ bsn = 0b1111;
+
+ uint32_t read_out = static_cast<uint32_t>(bsn);
+ uint32_t read_out_impl = bsn;
+
+ EXPECT_EQ(read_out, read_out_impl);
+ EXPECT_EQ(read_out, 0b1111u);
+ EXPECT_EQ(AsUint(bsn), 0b11110000u);
+}
+
+BITSTRUCT_DEFINE_START(TestBitStruct, /* size */ 8)
+ BitStructInt</*lsb*/0, /*width*/3> i3;
+ BitStructUint</*lsb*/3, /*width*/4> u4;
+
+ BitStructUint</*lsb*/0, /*width*/7> alias_all;
+BITSTRUCT_DEFINE_END(TestBitStruct);
+
+TEST(BitStructs, Test1) {
+ {
+ // Check minimal size selection is correct.
+ BitStructInt</*lsb*/0, /*width*/3> i3;
+ BitStructUint</*lsb*/3, /*width*/4> u4;
+
+ BitStructUint</*lsb*/0, /*width*/7> alias_all;
+
+ EXPECT_EQ(1u, sizeof(i3));
+ EXPECT_EQ(1u, sizeof(u4));
+ EXPECT_EQ(1u, sizeof(alias_all));
+ }
+ TestBitStruct tst;
+ ZeroInitialize(tst);
+
+ // Check minimal size selection is correct.
+ EXPECT_EQ(1u, sizeof(TestBitStruct));
+ EXPECT_EQ(1u, sizeof(tst._));
+ EXPECT_EQ(1u, sizeof(tst.i3));
+ EXPECT_EQ(1u, sizeof(tst.u4));
+ EXPECT_EQ(1u, sizeof(tst.alias_all));
+
+ // Check operator assignment.
+ tst.i3 = -1;
+ tst.u4 = 0b1010;
+
+ // Check implicit operator conversion.
+ int8_t read_i3 = tst.i3;
+ uint8_t read_u4 = tst.u4;
+
+ // Ensure read-out values were correct.
+ EXPECT_EQ(static_cast<int8_t>(-1), read_i3);
+ EXPECT_EQ(0b1010, read_u4);
+
+ // Ensure aliasing is working.
+ EXPECT_EQ(0b1010111, static_cast<uint8_t>(tst.alias_all));
+
+ // Ensure the bit pattern is correct.
+ EXPECT_EQ(0b1010111u, AsUint(tst));
+
+ // Math operator checks
+ {
+ // In-place
+ ++tst.u4;
+ EXPECT_EQ(static_cast<uint8_t>(0b1011), static_cast<uint8_t>(tst.u4));
+ --tst.u4;
+ EXPECT_EQ(static_cast<uint8_t>(0b1010), static_cast<uint8_t>(tst.u4));
+
+ // Copy
+ uint8_t read_and_convert = tst.u4++;
+ EXPECT_EQ(static_cast<uint8_t>(0b1011), read_and_convert);
+ EXPECT_EQ(static_cast<uint8_t>(0b1010), static_cast<uint8_t>(tst.u4));
+ read_and_convert = tst.u4--;
+ EXPECT_EQ(static_cast<uint8_t>(0b1001), read_and_convert);
+ EXPECT_EQ(static_cast<uint8_t>(0b1010), static_cast<uint8_t>(tst.u4));
+
+ // Check boolean operator conversion.
+ tst.u4 = 0b1010;
+ EXPECT_TRUE(static_cast<bool>(tst.u4));
+ bool succ = tst.u4 ? true : false;
+ EXPECT_TRUE(succ);
+
+ tst.u4 = 0;
+ EXPECT_FALSE(static_cast<bool>(tst.u4));
+
+/*
+ // Disabled: Overflow is caught by the BitFieldInsert DCHECKs.
+ // Check overflow for uint.
+ tst.u4 = 0b1111;
+ ++tst.u4;
+ EXPECT_EQ(static_cast<uint8_t>(0), static_cast<uint8_t>(tst.u4));
+*/
+ }
+}
+
+BITSTRUCT_DEFINE_START(MixedSizeBitStruct, /* size */ 32)
+ BitStructUint</*lsb*/0, /*width*/3> u3;
+ BitStructUint</*lsb*/3, /*width*/10> u10;
+ BitStructUint</*lsb*/13, /*width*/19> u19;
+
+ BitStructUint</*lsb*/0, /*width*/32> alias_all;
+BITSTRUCT_DEFINE_END(MixedSizeBitStruct);
+
+// static_assert(sizeof(MixedSizeBitStruct) == sizeof(uint32_t), "TestBitStructs#MixedSize");
+
+TEST(BitStructs, Mixed) {
+ EXPECT_EQ(4u, sizeof(MixedSizeBitStruct));
+
+ MixedSizeBitStruct tst;
+ ZeroInitialize(tst);
+
+ // Check operator assignment.
+ tst.u3 = 0b111u;
+ tst.u10 = 0b1111010100u;
+ tst.u19 = 0b1010101010101010101u;
+
+ // Check implicit operator conversion.
+ uint8_t read_u3 = tst.u3;
+ uint16_t read_u10 = tst.u10;
+ uint32_t read_u19 = tst.u19;
+
+ // Ensure read-out values were correct.
+ EXPECT_EQ(0b111u, read_u3);
+ EXPECT_EQ(0b1111010100u, read_u10);
+ EXPECT_EQ(0b1010101010101010101u, read_u19);
+
+ uint32_t read_all = tst.alias_all;
+
+ // Ensure aliasing is working.
+ EXPECT_EQ(0b10101010101010101011111010100111u, read_all);
+
+ // Ensure the bit pattern is correct.
+ EXPECT_EQ(0b10101010101010101011111010100111u, AsUint(tst));
+}
+
+} // namespace art
diff --git a/runtime/base/bit_utils.h b/runtime/base/bit_utils.h
index 87dac02..da3c704 100644
--- a/runtime/base/bit_utils.h
+++ b/runtime/base/bit_utils.h
@@ -371,6 +371,128 @@
return opnd;
}
+// Create a mask for the least significant "bits"
+// The returned value is always unsigned to prevent undefined behavior for bitwise ops.
+//
+// Given 'bits',
+// Returns:
+// <--- bits --->
+// +-----------------+------------+
+// | 0 ............0 | 1.....1 |
+// +-----------------+------------+
+// msb lsb
+template <typename T = size_t>
+inline static constexpr std::make_unsigned_t<T> MaskLeastSignificant(size_t bits) {
+ DCHECK_GE(BitSizeOf<T>(), bits) << "Bits out of range for type T";
+ using unsigned_T = std::make_unsigned_t<T>;
+ if (bits >= BitSizeOf<T>()) {
+ return std::numeric_limits<unsigned_T>::max();
+ } else {
+ return static_cast<unsigned_T>((1 << bits) - 1);
+ }
+}
+
+// Clears the bitfield starting at the least significant bit "lsb" with a bitwidth of 'width'.
+// (Equivalent of ARM BFC instruction).
+//
+// Given:
+// <-- width -->
+// +--------+------------+--------+
+// | ABC... | bitfield | XYZ... +
+// +--------+------------+--------+
+// lsb 0
+// Returns:
+// <-- width -->
+// +--------+------------+--------+
+// | ABC... | 0........0 | XYZ... +
+// +--------+------------+--------+
+// lsb 0
+template <typename T>
+inline static constexpr T BitFieldClear(T value, size_t lsb, size_t width) {
+ DCHECK_GE(BitSizeOf(value), lsb + width) << "Bit field out of range for value";
+ const auto val = static_cast<std::make_unsigned_t<T>>(value);
+ const auto mask = MaskLeastSignificant<T>(width);
+
+ return static_cast<T>(val & ~(mask << lsb));
+}
+
+// Inserts the contents of 'data' into bitfield of 'value' starting
+// at the least significant bit "lsb" with a bitwidth of 'width'.
+// Note: data must be within range of [MinInt(width), MaxInt(width)].
+// (Equivalent of ARM BFI instruction).
+//
+// Given (data):
+// <-- width -->
+// +--------+------------+--------+
+// | ABC... | bitfield | XYZ... +
+// +--------+------------+--------+
+// lsb 0
+// Returns:
+// <-- width -->
+// +--------+------------+--------+
+// | ABC... | 0...data | XYZ... +
+// +--------+------------+--------+
+// lsb 0
+
+template <typename T, typename T2>
+inline static constexpr T BitFieldInsert(T value, T2 data, size_t lsb, size_t width) {
+ DCHECK_GE(BitSizeOf(value), lsb + width) << "Bit field out of range for value";
+ if (width != 0u) {
+ DCHECK_GE(MaxInt<T2>(width), data) << "Data out of range [too large] for bitwidth";
+ DCHECK_LE(MinInt<T2>(width), data) << "Data out of range [too small] for bitwidth";
+ } else {
+ DCHECK_EQ(static_cast<T2>(0), data) << "Data out of range [nonzero] for bitwidth 0";
+ }
+ const auto data_mask = MaskLeastSignificant<T2>(width);
+ const auto value_cleared = BitFieldClear(value, lsb, width);
+
+ return static_cast<T>(value_cleared | ((data & data_mask) << lsb));
+}
+
+// Extracts the bitfield starting at the least significant bit "lsb" with a bitwidth of 'width'.
+// Signed types are sign-extended during extraction. (Equivalent of ARM UBFX/SBFX instruction).
+//
+// Given:
+// <-- width -->
+// +--------+-------------+-------+
+// | | bitfield | +
+// +--------+-------------+-------+
+// lsb 0
+// (Unsigned) Returns:
+// <-- width -->
+// +----------------+-------------+
+// | 0... 0 | bitfield |
+// +----------------+-------------+
+// 0
+// (Signed) Returns:
+// <-- width -->
+// +----------------+-------------+
+// | S... S | bitfield |
+// +----------------+-------------+
+// 0
+// where S is the highest bit in 'bitfield'.
+template <typename T>
+inline static constexpr T BitFieldExtract(T value, size_t lsb, size_t width) {
+ DCHECK_GE(BitSizeOf(value), lsb + width) << "Bit field out of range for value";
+ const auto val = static_cast<std::make_unsigned_t<T>>(value);
+
+ const T bitfield_unsigned =
+ static_cast<T>((val >> lsb) & MaskLeastSignificant<T>(width));
+ if (std::is_signed<T>::value) {
+ // Perform sign extension
+ if (width == 0) { // Avoid underflow.
+ return static_cast<T>(0);
+ } else if (bitfield_unsigned & (1 << (width - 1))) { // Detect if sign bit was set.
+ // MSB <width> LSB
+ // 0b11111...100...000000
+ const auto ones_negmask = ~MaskLeastSignificant<T>(width);
+ return static_cast<T>(bitfield_unsigned | ones_negmask);
+ }
+ }
+ // Skip sign extension.
+ return bitfield_unsigned;
+}
+
} // namespace art
#endif // ART_RUNTIME_BASE_BIT_UTILS_H_
diff --git a/runtime/base/bit_utils_test.cc b/runtime/base/bit_utils_test.cc
index c96c6dc..0276d8d 100644
--- a/runtime/base/bit_utils_test.cc
+++ b/runtime/base/bit_utils_test.cc
@@ -345,6 +345,97 @@
"TestIsAbsoluteUint64#27");
static_assert(!IsAbsoluteUint<32, int64_t>(kUint32MaxPlus1), "TestIsAbsoluteUint64#28");
+static_assert(MaskLeastSignificant(0) == 0b0, "TestMaskLeastSignificant#1");
+static_assert(MaskLeastSignificant(1) == 0b1, "TestMaskLeastSignificant#2");
+static_assert(MaskLeastSignificant(2) == 0b11, "TestMaskLeastSignificant#3");
+static_assert(MaskLeastSignificant<uint8_t>(8) == 0xFF, "TestMaskLeastSignificant#4");
+static_assert(MaskLeastSignificant<int8_t>(8) == 0xFF, "TestMaskLeastSignificant#5");
+
+static_assert(BitFieldClear(0xFF, /*lsb*/0, /*width*/0) == 0xFF, "TestBitFieldClear#1");
+static_assert(BitFieldClear(std::numeric_limits<uint32_t>::max(), /*lsb*/0, /*width*/32) == 0x0,
+ "TestBitFieldClear#2");
+static_assert(BitFieldClear(std::numeric_limits<int32_t>::max(), /*lsb*/0, /*width*/32) == 0x0,
+ "TestBitFieldClear#3");
+static_assert(BitFieldClear(0xFF, /*lsb*/0, /*width*/2) == 0b11111100, "TestBitFieldClear#4");
+static_assert(BitFieldClear(0xFF, /*lsb*/0, /*width*/3) == 0b11111000, "TestBitFieldClear#5");
+static_assert(BitFieldClear(0xFF, /*lsb*/1, /*width*/3) == 0b11110001, "TestBitFieldClear#6");
+static_assert(BitFieldClear(0xFF, /*lsb*/2, /*width*/3) == 0b11100011, "TestBitFieldClear#7");
+
+static_assert(BitFieldExtract(0xFF, /*lsb*/0, /*width*/0) == 0x0, "TestBitFieldExtract#1");
+static_assert(BitFieldExtract(std::numeric_limits<uint32_t>::max(), /*lsb*/0, /*width*/32)
+ == std::numeric_limits<uint32_t>::max(),
+ "TestBitFieldExtract#2");
+static_assert(BitFieldExtract(std::numeric_limits<int32_t>::max(), /*lsb*/0, /*width*/32)
+ == std::numeric_limits<int32_t>::max(),
+ "TestBitFieldExtract#3");
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/0, /*width*/2) == 0b00000011,
+ "TestBitFieldExtract#4");
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/0, /*width*/3) == 0b00000111,
+ "TestBitFieldExtract#5");
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/1, /*width*/3) == 0b00000111,
+ "TestBitFieldExtract#6");
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/2, /*width*/3) == 0b00000111,
+ "TestBitFieldExtract#7");
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/3, /*width*/3) == 0b00000111,
+ "TestBitFieldExtract#8");
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/8, /*width*/3) == 0b00000000,
+ "TestBitFieldExtract#9");
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/7, /*width*/3) == 0b00000001,
+ "TestBitFieldExtract#10");
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/6, /*width*/3) == 0b00000011,
+ "TestBitFieldExtract#11");
+static_assert(BitFieldExtract(0xFF, /*lsb*/0, /*width*/2) == -1, "TestBitFieldExtract#12");
+static_assert(BitFieldExtract(0xFF, /*lsb*/0, /*width*/3) == -1, "TestBitFieldExtract#13");
+static_assert(BitFieldExtract(0xFF, /*lsb*/1, /*width*/3) == -1, "TestBitFieldExtract#14");
+static_assert(BitFieldExtract(0xFF, /*lsb*/2, /*width*/3) == -1, "TestBitFieldExtract#15");
+static_assert(BitFieldExtract(0xFF, /*lsb*/3, /*width*/3) == -1, "TestBitFieldExtract#16");
+static_assert(BitFieldExtract(0xFF, /*lsb*/8, /*width*/3) == 0b00000000, "TestBitFieldExtract#17");
+static_assert(BitFieldExtract(0xFF, /*lsb*/7, /*width*/3) == 0b00000001, "TestBitFieldExtract#18");
+static_assert(BitFieldExtract(0xFF, /*lsb*/6, /*width*/3) == 0b00000011, "TestBitFieldExtract#19");
+static_assert(BitFieldExtract(static_cast<uint8_t>(0b01101010), /*lsb*/2, /*width*/4)
+ == 0b00001010,
+ "TestBitFieldExtract#20");
+static_assert(BitFieldExtract(static_cast<int8_t>(0b01101010), /*lsb*/2, /*width*/4)
+ == static_cast<int8_t>(0b11111010),
+ "TestBitFieldExtract#21");
+
+static_assert(BitFieldInsert(0xFF, /*data*/0x0, /*lsb*/0, /*width*/0) == 0xFF,
+ "TestBitFieldInsert#1");
+static_assert(BitFieldInsert(std::numeric_limits<uint32_t>::max(),
+ /*data*/std::numeric_limits<uint32_t>::max(),
+ /*lsb*/0,
+ /*width*/32)
+ == std::numeric_limits<uint32_t>::max(),
+ "TestBitFieldInsert#2");
+static_assert(BitFieldInsert(std::numeric_limits<int32_t>::max(),
+ /*data*/std::numeric_limits<uint32_t>::max(),
+ /*lsb*/0,
+ /*width*/32)
+ == std::numeric_limits<uint32_t>::max(),
+ "TestBitFieldInsert#3");
+static_assert(BitFieldInsert(0u,
+ /*data*/std::numeric_limits<uint32_t>::max(),
+ /*lsb*/0,
+ /*width*/32)
+ == std::numeric_limits<uint32_t>::max(),
+ "TestBitFieldInsert#4");
+static_assert(BitFieldInsert(-(-0),
+ /*data*/std::numeric_limits<uint32_t>::max(),
+ /*lsb*/0,
+ /*width*/32)
+ == std::numeric_limits<uint32_t>::max(),
+ "TestBitFieldInsert#5");
+static_assert(BitFieldInsert(0x00, /*data*/0b11u, /*lsb*/0, /*width*/2) == 0b00000011,
+ "TestBitFieldInsert#6");
+static_assert(BitFieldInsert(0x00, /*data*/0b111u, /*lsb*/0, /*width*/3) == 0b00000111,
+ "TestBitFieldInsert#7");
+static_assert(BitFieldInsert(0x00, /*data*/0b111u, /*lsb*/1, /*width*/3) == 0b00001110,
+ "TestBitFieldInsert#8");
+static_assert(BitFieldInsert(0x00, /*data*/0b111u, /*lsb*/2, /*width*/3) == 0b00011100,
+ "TestBitFieldInsert#9");
+static_assert(BitFieldInsert(0b01011100, /*data*/0b1101u, /*lsb*/4, /*width*/4) == 0b11011100,
+ "TestBitFieldInsert#10");
+
template <typename Container>
void CheckElements(const std::initializer_list<uint32_t>& expected, const Container& elements) {
auto expected_it = expected.begin();
diff --git a/runtime/base/hash_set.h b/runtime/base/hash_set.h
index c472a9e..bc25b36 100644
--- a/runtime/base/hash_set.h
+++ b/runtime/base/hash_set.h
@@ -22,6 +22,7 @@
#include <functional>
#include <iterator>
#include <memory>
+#include <type_traits>
#include <utility>
#include "bit_utils.h"
@@ -385,18 +386,20 @@
}
// Insert an element, allows duplicates.
- void Insert(const T& element) {
- InsertWithHash(element, hashfn_(element));
+ template <typename U, typename = typename std::enable_if<std::is_convertible<U, T>::value>::type>
+ void Insert(U&& element) {
+ InsertWithHash(std::forward<U>(element), hashfn_(element));
}
- void InsertWithHash(const T& element, size_t hash) {
+ template <typename U, typename = typename std::enable_if<std::is_convertible<U, T>::value>::type>
+ void InsertWithHash(U&& element, size_t hash) {
DCHECK_EQ(hash, hashfn_(element));
if (num_elements_ >= elements_until_expand_) {
Expand();
DCHECK_LT(num_elements_, elements_until_expand_);
}
const size_t index = FirstAvailableSlot(IndexForHash(hash));
- data_[index] = element;
+ data_[index] = std::forward<U>(element);
++num_elements_;
}
diff --git a/runtime/base/scoped_arena_allocator.h b/runtime/base/scoped_arena_allocator.h
index 1a0eb5e..f156f52 100644
--- a/runtime/base/scoped_arena_allocator.h
+++ b/runtime/base/scoped_arena_allocator.h
@@ -145,6 +145,10 @@
explicit ScopedArenaAllocator(ArenaStack* arena_stack);
~ScopedArenaAllocator();
+ ArenaStack* GetArenaStack() const {
+ return arena_stack_;
+ }
+
void Reset();
void* Alloc(size_t bytes, ArenaAllocKind kind = kArenaAllocMisc) ALWAYS_INLINE {
diff --git a/runtime/base/scoped_arena_containers.h b/runtime/base/scoped_arena_containers.h
index 4a6c907..fccaaea 100644
--- a/runtime/base/scoped_arena_containers.h
+++ b/runtime/base/scoped_arena_containers.h
@@ -52,17 +52,40 @@
using ScopedArenaVector = dchecked_vector<T, ScopedArenaAllocatorAdapter<T>>;
template <typename T, typename Comparator = std::less<T>>
+using ScopedArenaPriorityQueue = std::priority_queue<T, ScopedArenaVector<T>, Comparator>;
+
+template <typename T>
+using ScopedArenaStdStack = std::stack<T, ScopedArenaDeque<T>>;
+
+template <typename T, typename Comparator = std::less<T>>
using ScopedArenaSet = std::set<T, Comparator, ScopedArenaAllocatorAdapter<T>>;
template <typename K, typename V, typename Comparator = std::less<K>>
using ScopedArenaSafeMap =
SafeMap<K, V, Comparator, ScopedArenaAllocatorAdapter<std::pair<const K, V>>>;
+template <typename T,
+ typename EmptyFn = DefaultEmptyFn<T>,
+ typename HashFn = std::hash<T>,
+ typename Pred = std::equal_to<T>>
+using ScopedArenaHashSet = HashSet<T, EmptyFn, HashFn, Pred, ScopedArenaAllocatorAdapter<T>>;
+
+template <typename Key,
+ typename Value,
+ typename EmptyFn = DefaultEmptyFn<std::pair<Key, Value>>,
+ typename HashFn = std::hash<Key>,
+ typename Pred = std::equal_to<Key>>
+using ScopedArenaHashMap = HashMap<Key,
+ Value,
+ EmptyFn,
+ HashFn,
+ Pred,
+ ScopedArenaAllocatorAdapter<std::pair<Key, Value>>>;
+
template <typename K, typename V, class Hash = std::hash<K>, class KeyEqual = std::equal_to<K>>
using ScopedArenaUnorderedMap =
std::unordered_map<K, V, Hash, KeyEqual, ScopedArenaAllocatorAdapter<std::pair<const K, V>>>;
-
// Implementation details below.
template <>
@@ -79,12 +102,12 @@
typedef ScopedArenaAllocatorAdapter<U> other;
};
- explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* arena_allocator,
+ explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* allocator,
ArenaAllocKind kind = kArenaAllocSTL)
- : DebugStackReference(arena_allocator),
- DebugStackIndirectTopRef(arena_allocator),
+ : DebugStackReference(allocator),
+ DebugStackIndirectTopRef(allocator),
ArenaAllocatorAdapterKind(kind),
- arena_stack_(arena_allocator->arena_stack_) {
+ arena_stack_(allocator->arena_stack_) {
}
template <typename U>
ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter<U>& other) // NOLINT, implicit
@@ -122,12 +145,12 @@
typedef ScopedArenaAllocatorAdapter<U> other;
};
- explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* arena_allocator,
+ explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* allocator,
ArenaAllocKind kind = kArenaAllocSTL)
- : DebugStackReference(arena_allocator),
- DebugStackIndirectTopRef(arena_allocator),
+ : DebugStackReference(allocator),
+ DebugStackIndirectTopRef(allocator),
ArenaAllocatorAdapterKind(kind),
- arena_stack_(arena_allocator->arena_stack_) {
+ arena_stack_(allocator->arena_stack_) {
}
template <typename U>
ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter<U>& other) // NOLINT, implicit
diff --git a/runtime/base/variant_map.h b/runtime/base/variant_map.h
index d87df87..71a1018 100644
--- a/runtime/base/variant_map.h
+++ b/runtime/base/variant_map.h
@@ -237,6 +237,14 @@
return (ptr == nullptr) ? key.CreateDefaultValue() : *ptr;
}
+ template <typename T, typename U>
+ void AssignIfExists(const TKey<T>& key, U* out) {
+ DCHECK(out != nullptr);
+ if (Exists(key)) {
+ *out = std::move(*Get(key));
+ }
+ }
+
private:
// TODO: move to detail, or make it more generic like a ScopeGuard(function)
template <typename TValue>
diff --git a/runtime/jit/profile_compilation_info.cc b/runtime/jit/profile_compilation_info.cc
index 5601317..f9603a7 100644
--- a/runtime/jit/profile_compilation_info.cc
+++ b/runtime/jit/profile_compilation_info.cc
@@ -76,20 +76,20 @@
ProfileCompilationInfo::ProfileCompilationInfo(ArenaPool* custom_arena_pool)
: default_arena_pool_(),
- arena_(custom_arena_pool),
- info_(arena_.Adapter(kArenaAllocProfile)),
- profile_key_map_(std::less<const std::string>(), arena_.Adapter(kArenaAllocProfile)) {
+ allocator_(custom_arena_pool),
+ info_(allocator_.Adapter(kArenaAllocProfile)),
+ profile_key_map_(std::less<const std::string>(), allocator_.Adapter(kArenaAllocProfile)) {
}
ProfileCompilationInfo::ProfileCompilationInfo()
: default_arena_pool_(/*use_malloc*/true, /*low_4gb*/false, "ProfileCompilationInfo"),
- arena_(&default_arena_pool_),
- info_(arena_.Adapter(kArenaAllocProfile)),
- profile_key_map_(std::less<const std::string>(), arena_.Adapter(kArenaAllocProfile)) {
+ allocator_(&default_arena_pool_),
+ info_(allocator_.Adapter(kArenaAllocProfile)),
+ profile_key_map_(std::less<const std::string>(), allocator_.Adapter(kArenaAllocProfile)) {
}
ProfileCompilationInfo::~ProfileCompilationInfo() {
- VLOG(profiler) << Dumpable<MemStats>(arena_.GetMemStats());
+ VLOG(profiler) << Dumpable<MemStats>(allocator_.GetMemStats());
for (DexFileData* data : info_) {
delete data;
}
@@ -569,8 +569,8 @@
uint8_t profile_index = profile_index_it->second;
if (info_.size() <= profile_index) {
// This is a new addition. Add it to the info_ array.
- DexFileData* dex_file_data = new (&arena_) DexFileData(
- &arena_,
+ DexFileData* dex_file_data = new (&allocator_) DexFileData(
+ &allocator_,
profile_key,
checksum,
profile_index,
@@ -1871,7 +1871,7 @@
ProfileCompilationInfo::DexPcData*
ProfileCompilationInfo::FindOrAddDexPc(InlineCacheMap* inline_cache, uint32_t dex_pc) {
- return &(inline_cache->FindOrAdd(dex_pc, DexPcData(&arena_))->second);
+ return &(inline_cache->FindOrAdd(dex_pc, DexPcData(&allocator_))->second);
}
std::unordered_set<std::string> ProfileCompilationInfo::GetClassDescriptors(
diff --git a/runtime/jit/profile_compilation_info.h b/runtime/jit/profile_compilation_info.h
index 09de29e..8889b34 100644
--- a/runtime/jit/profile_compilation_info.h
+++ b/runtime/jit/profile_compilation_info.h
@@ -133,10 +133,10 @@
// megamorphic and its possible types).
// If the receiver is megamorphic or is missing types the set of classes will be empty.
struct DexPcData : public ArenaObject<kArenaAllocProfile> {
- explicit DexPcData(ArenaAllocator* arena)
+ explicit DexPcData(ArenaAllocator* allocator)
: is_missing_types(false),
is_megamorphic(false),
- classes(std::less<ClassReference>(), arena->Adapter(kArenaAllocProfile)) {}
+ classes(std::less<ClassReference>(), allocator->Adapter(kArenaAllocProfile)) {}
void AddClass(uint16_t dex_profile_idx, const dex::TypeIndex& type_idx);
void SetIsMegamorphic() {
if (is_missing_types) return;
@@ -405,7 +405,7 @@
static bool Equals(const ProfileCompilationInfo::OfflineProfileMethodInfo& pmi1,
const ProfileCompilationInfo::OfflineProfileMethodInfo& pmi2);
- ArenaAllocator* GetArena() { return &arena_; }
+ ArenaAllocator* GetAllocator() { return &allocator_; }
// Return all of the class descriptors in the profile for a set of dex files.
std::unordered_set<std::string> GetClassDescriptors(const std::vector<const DexFile*>& dex_files);
@@ -429,19 +429,19 @@
// profile_key_map_ and info_. However, it makes the profiles logic much
// simpler if we have references here as well.
struct DexFileData : public DeletableArenaObject<kArenaAllocProfile> {
- DexFileData(ArenaAllocator* arena,
+ DexFileData(ArenaAllocator* allocator,
const std::string& key,
uint32_t location_checksum,
uint16_t index,
uint32_t num_methods)
- : arena_(arena),
+ : arena_(allocator),
profile_key(key),
profile_index(index),
checksum(location_checksum),
- method_map(std::less<uint16_t>(), arena->Adapter(kArenaAllocProfile)),
- class_set(std::less<dex::TypeIndex>(), arena->Adapter(kArenaAllocProfile)),
+ method_map(std::less<uint16_t>(), allocator->Adapter(kArenaAllocProfile)),
+ class_set(std::less<dex::TypeIndex>(), allocator->Adapter(kArenaAllocProfile)),
num_method_ids(num_methods),
- bitmap_storage(arena->Adapter(kArenaAllocProfile)) {
+ bitmap_storage(allocator->Adapter(kArenaAllocProfile)) {
const size_t num_bits = num_method_ids * kBitmapIndexCount;
bitmap_storage.resize(RoundUp(num_bits, kBitsPerByte) / kBitsPerByte);
if (!bitmap_storage.empty()) {
@@ -698,7 +698,7 @@
friend class Dex2oatLayoutTest;
ArenaPool default_arena_pool_;
- ArenaAllocator arena_;
+ ArenaAllocator allocator_;
// Vector containing the actual profile info.
// The vector index is the profile index of the dex data and
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 3357fa7..70dd5cb 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -95,10 +95,10 @@
}
static void VMDebug_startMethodTracingFd(JNIEnv* env, jclass, jstring javaTraceFilename,
- jobject javaFd, jint bufferSize, jint flags,
+ jint javaFd, jint bufferSize, jint flags,
jboolean samplingEnabled, jint intervalUs,
jboolean streamingOutput) {
- int originalFd = jniGetFDFromFileDescriptor(env, javaFd);
+ int originalFd = javaFd;
if (originalFd < 0) {
return;
}
@@ -224,9 +224,9 @@
* Cause "hprof" data to be dumped. We can throw an IOException if an
* error occurs during file handling.
*/
-static void VMDebug_dumpHprofData(JNIEnv* env, jclass, jstring javaFilename, jobject javaFd) {
+static void VMDebug_dumpHprofData(JNIEnv* env, jclass, jstring javaFilename, jint javaFd) {
// Only one of these may be null.
- if (javaFilename == nullptr && javaFd == nullptr) {
+ if (javaFilename == nullptr && javaFd < 0) {
ScopedObjectAccess soa(env);
ThrowNullPointerException("fileName == null && fd == null");
return;
@@ -243,15 +243,7 @@
filename = "[fd]";
}
- int fd = -1;
- if (javaFd != nullptr) {
- fd = jniGetFDFromFileDescriptor(env, javaFd);
- if (fd < 0) {
- ScopedObjectAccess soa(env);
- ThrowRuntimeException("Invalid file descriptor");
- return;
- }
- }
+ int fd = javaFd;
hprof::DumpHeap(filename.c_str(), fd, false);
}
@@ -537,7 +529,7 @@
NATIVE_METHOD(VMDebug, countInstancesOfClass, "(Ljava/lang/Class;Z)J"),
NATIVE_METHOD(VMDebug, countInstancesOfClasses, "([Ljava/lang/Class;Z)[J"),
NATIVE_METHOD(VMDebug, crash, "()V"),
- NATIVE_METHOD(VMDebug, dumpHprofData, "(Ljava/lang/String;Ljava/io/FileDescriptor;)V"),
+ NATIVE_METHOD(VMDebug, dumpHprofData, "(Ljava/lang/String;I)V"),
NATIVE_METHOD(VMDebug, dumpHprofDataDdms, "()V"),
NATIVE_METHOD(VMDebug, dumpReferenceTables, "()V"),
NATIVE_METHOD(VMDebug, getAllocCount, "(I)I"),
@@ -557,7 +549,7 @@
NATIVE_METHOD(VMDebug, startEmulatorTracing, "()V"),
NATIVE_METHOD(VMDebug, startInstructionCounting, "()V"),
NATIVE_METHOD(VMDebug, startMethodTracingDdmsImpl, "(IIZI)V"),
- NATIVE_METHOD(VMDebug, startMethodTracingFd, "(Ljava/lang/String;Ljava/io/FileDescriptor;IIZIZ)V"),
+ NATIVE_METHOD(VMDebug, startMethodTracingFd, "(Ljava/lang/String;IIIZIZ)V"),
NATIVE_METHOD(VMDebug, startMethodTracingFilename, "(Ljava/lang/String;IIZI)V"),
NATIVE_METHOD(VMDebug, stopAllocCounting, "()V"),
NATIVE_METHOD(VMDebug, stopEmulatorTracing, "()V"),
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 8afbe78..57ab56c 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -221,7 +221,7 @@
return IsConstructor() && !IsStatic();
}
- ScopedArenaAllocator& GetArena() {
+ ScopedArenaAllocator& GetScopedAllocator() {
return arena_;
}
diff --git a/runtime/verifier/reg_type-inl.h b/runtime/verifier/reg_type-inl.h
index 704d2a8..631c6bd 100644
--- a/runtime/verifier/reg_type-inl.h
+++ b/runtime/verifier/reg_type-inl.h
@@ -199,8 +199,8 @@
return instance_;
}
-inline void* RegType::operator new(size_t size, ScopedArenaAllocator* arena) {
- return arena->Alloc(size, kArenaAllocMisc);
+inline void* RegType::operator new(size_t size, ScopedArenaAllocator* allocator) {
+ return allocator->Alloc(size, kArenaAllocMisc);
}
} // namespace verifier
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index c5d8ff5..a2085a3 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -264,8 +264,8 @@
return ::operator new(size);
}
- static void* operator new(size_t size, ArenaAllocator* arena) = delete;
- static void* operator new(size_t size, ScopedArenaAllocator* arena);
+ static void* operator new(size_t size, ArenaAllocator* allocator) = delete;
+ static void* operator new(size_t size, ScopedArenaAllocator* allocator);
enum class AssignmentType {
kBoolean,
diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h
index 3da1680..a9c9428 100644
--- a/runtime/verifier/register_line-inl.h
+++ b/runtime/verifier/register_line-inl.h
@@ -178,14 +178,15 @@
}
inline RegisterLine* RegisterLine::Create(size_t num_regs, MethodVerifier* verifier) {
- void* memory = verifier->GetArena().Alloc(ComputeSize(num_regs));
+ void* memory = verifier->GetScopedAllocator().Alloc(ComputeSize(num_regs));
return new (memory) RegisterLine(num_regs, verifier);
}
inline RegisterLine::RegisterLine(size_t num_regs, MethodVerifier* verifier)
: num_regs_(num_regs),
- monitors_(verifier->GetArena().Adapter(kArenaAllocVerifier)),
- reg_to_lock_depths_(std::less<uint32_t>(), verifier->GetArena().Adapter(kArenaAllocVerifier)),
+ monitors_(verifier->GetScopedAllocator().Adapter(kArenaAllocVerifier)),
+ reg_to_lock_depths_(std::less<uint32_t>(),
+ verifier->GetScopedAllocator().Adapter(kArenaAllocVerifier)),
this_initialized_(false) {
std::uninitialized_fill_n(line_, num_regs_, 0u);
SetResultTypeToUnknown(verifier);
diff --git a/test/1937-transform-soft-fail/check b/test/1937-transform-soft-fail/check
new file mode 100755
index 0000000..7cee530
--- /dev/null
+++ b/test/1937-transform-soft-fail/check
@@ -0,0 +1,19 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+sed -e 's/:.*$//' "$2" > "$2.tmp"
+
+./default-check "$1" "$2.tmp"
diff --git a/test/1937-transform-soft-fail/expected.txt b/test/1937-transform-soft-fail/expected.txt
new file mode 100644
index 0000000..f0f6ac8
--- /dev/null
+++ b/test/1937-transform-soft-fail/expected.txt
@@ -0,0 +1,3 @@
+hello
+throwing
+Caught exception java.lang.NoSuchMethodError
diff --git a/test/1937-transform-soft-fail/info.txt b/test/1937-transform-soft-fail/info.txt
new file mode 100644
index 0000000..875a5f6
--- /dev/null
+++ b/test/1937-transform-soft-fail/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/1937-transform-soft-fail/run b/test/1937-transform-soft-fail/run
new file mode 100755
index 0000000..c6e62ae
--- /dev/null
+++ b/test/1937-transform-soft-fail/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/1937-transform-soft-fail/src/Main.java b/test/1937-transform-soft-fail/src/Main.java
new file mode 100644
index 0000000..e3541b3
--- /dev/null
+++ b/test/1937-transform-soft-fail/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ art.Test1937.run();
+ }
+}
diff --git a/test/1937-transform-soft-fail/src/art/Redefinition.java b/test/1937-transform-soft-fail/src/art/Redefinition.java
new file mode 100644
index 0000000..56d2938
--- /dev/null
+++ b/test/1937-transform-soft-fail/src/art/Redefinition.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.ArrayList;
+// Common Redefinition functions. Placed here for use by CTS
+public class Redefinition {
+ public static final class CommonClassDefinition {
+ public final Class<?> target;
+ public final byte[] class_file_bytes;
+ public final byte[] dex_file_bytes;
+
+ public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
+ this.target = target;
+ this.class_file_bytes = class_file_bytes;
+ this.dex_file_bytes = dex_file_bytes;
+ }
+ }
+
+ // A set of possible test configurations. Test should set this if they need to.
+ // This must be kept in sync with the defines in ti-agent/common_helper.cc
+ public static enum Config {
+ COMMON_REDEFINE(0),
+ COMMON_RETRANSFORM(1),
+ COMMON_TRANSFORM(2);
+
+ private final int val;
+ private Config(int val) {
+ this.val = val;
+ }
+ }
+
+ public static void setTestConfiguration(Config type) {
+ nativeSetTestConfiguration(type.val);
+ }
+
+ private static native void nativeSetTestConfiguration(int type);
+
+ // Transforms the class
+ public static native void doCommonClassRedefinition(Class<?> target,
+ byte[] classfile,
+ byte[] dexfile);
+
+ public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
+ ArrayList<Class<?>> classes = new ArrayList<>();
+ ArrayList<byte[]> class_files = new ArrayList<>();
+ ArrayList<byte[]> dex_files = new ArrayList<>();
+
+ for (CommonClassDefinition d : defs) {
+ classes.add(d.target);
+ class_files.add(d.class_file_bytes);
+ dex_files.add(d.dex_file_bytes);
+ }
+ doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
+ class_files.toArray(new byte[0][]),
+ dex_files.toArray(new byte[0][]));
+ }
+
+ public static void addMultiTransformationResults(CommonClassDefinition... defs) {
+ for (CommonClassDefinition d : defs) {
+ addCommonTransformationResult(d.target.getCanonicalName(),
+ d.class_file_bytes,
+ d.dex_file_bytes);
+ }
+ }
+
+ public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
+ byte[][] classfiles,
+ byte[][] dexfiles);
+ public static native void doCommonClassRetransformation(Class<?>... target);
+ public static native void setPopRetransformations(boolean pop);
+ public static native void popTransformationFor(String name);
+ public static native void enableCommonRetransformation(boolean enable);
+ public static native void addCommonTransformationResult(String target_name,
+ byte[] class_bytes,
+ byte[] dex_bytes);
+}
diff --git a/test/1937-transform-soft-fail/src/art/Test1937.java b/test/1937-transform-soft-fail/src/art/Test1937.java
new file mode 100644
index 0000000..7255a5e
--- /dev/null
+++ b/test/1937-transform-soft-fail/src/art/Test1937.java
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Base64;
+public class Test1937 {
+
+ static class Transform {
+ public void sayHi() {
+ // Use lower 'h' to make sure the string will have a different string id
+ // than the transformation (the transformation code is the same except
+ // the actual printed String, which was making the test inacurately passing
+ // in JIT mode when loading the string from the dex cache, as the string ids
+ // of the two different strings were the same).
+ // We know the string ids will be different because lexicographically:
+ // "Goodbye" < "LTransform;" < "hello".
+ System.out.println("hello");
+ }
+ }
+
+ /**
+ * base64 encoded class/dex file for
+ * class Transform {
+ * public void sayHi() {
+ * System.out.println("throwing");
+ * Redefinition.notPresent();
+ * }
+ * }
+ */
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAJQoABwAPCQAQABEIABIKABMAFAoAFQAWBwAYBwAbAQAGPGluaXQ+AQADKClWAQAE" +
+ "Q29kZQEAD0xpbmVOdW1iZXJUYWJsZQEABXNheUhpAQAKU291cmNlRmlsZQEADVRlc3QxOTM3Lmph" +
+ "dmEMAAgACQcAHAwAHQAeAQAIdGhyb3dpbmcHAB8MACAAIQcAIgwAIwAJBwAkAQAWYXJ0L1Rlc3Qx" +
+ "OTM3JFRyYW5zZm9ybQEACVRyYW5zZm9ybQEADElubmVyQ2xhc3NlcwEAEGphdmEvbGFuZy9PYmpl" +
+ "Y3QBABBqYXZhL2xhbmcvU3lzdGVtAQADb3V0AQAVTGphdmEvaW8vUHJpbnRTdHJlYW07AQATamF2" +
+ "YS9pby9QcmludFN0cmVhbQEAB3ByaW50bG4BABUoTGphdmEvbGFuZy9TdHJpbmc7KVYBABBhcnQv" +
+ "UmVkZWZpbml0aW9uAQAKbm90UHJlc2VudAEADGFydC9UZXN0MTkzNwAgAAYABwAAAAAAAgAAAAgA" +
+ "CQABAAoAAAAdAAEAAQAAAAUqtwABsQAAAAEACwAAAAYAAQAAACMAAQAMAAkAAQAKAAAALAACAAEA" +
+ "AAAMsgACEgO2AAS4AAWxAAAAAQALAAAADgADAAAAJQAIACYACwAnAAIADQAAAAIADgAaAAAACgAB" +
+ "AAYAFwAZAAg=");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQDfmxvwUHv7EEBCvzjdM/uAviWG8eIsKIbsAwAAcAAAAHhWNBIAAAAAAAAAACgDAAAW" +
+ "AAAAcAAAAAoAAADIAAAAAgAAAPAAAAABAAAACAEAAAUAAAAQAQAAAQAAADgBAACUAgAAWAEAALoB" +
+ "AADCAQAA1gEAAPABAAAAAgAAJAIAAEQCAABbAgAAbwIAAIMCAACXAgAApgIAALECAAC0AgAAuAIA" +
+ "AMUCAADLAgAA1wIAANwCAADlAgAA7AIAAPYCAAABAAAAAgAAAAMAAAAEAAAABQAAAAYAAAAHAAAA" +
+ "CAAAAAkAAAAMAAAADAAAAAkAAAAAAAAADQAAAAkAAAC0AQAACAAFABEAAAAAAAAAEAAAAAEAAAAA" +
+ "AAAAAQAAABMAAAAFAAEAEgAAAAYAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAoAAACkAQAAGAMAAAAA" +
+ "AAACAAAACQMAAA8DAAABAAEAAQAAAP0CAAAEAAAAcBAEAAAADgADAAEAAgAAAAIDAAALAAAAYgAA" +
+ "ABoBFABuIAMAEABxAAAAAAAOAAAAWAEAAAAAAAAAAAAAAAAAAAEAAAAHAAY8aW5pdD4AEkxhcnQv" +
+ "UmVkZWZpbml0aW9uOwAYTGFydC9UZXN0MTkzNyRUcmFuc2Zvcm07AA5MYXJ0L1Rlc3QxOTM3OwAi" +
+ "TGRhbHZpay9hbm5vdGF0aW9uL0VuY2xvc2luZ0NsYXNzOwAeTGRhbHZpay9hbm5vdGF0aW9uL0lu" +
+ "bmVyQ2xhc3M7ABVMamF2YS9pby9QcmludFN0cmVhbTsAEkxqYXZhL2xhbmcvT2JqZWN0OwASTGph" +
+ "dmEvbGFuZy9TdHJpbmc7ABJMamF2YS9sYW5nL1N5c3RlbTsADVRlc3QxOTM3LmphdmEACVRyYW5z" +
+ "Zm9ybQABVgACVkwAC2FjY2Vzc0ZsYWdzAARuYW1lAApub3RQcmVzZW50AANvdXQAB3ByaW50bG4A" +
+ "BXNheUhpAAh0aHJvd2luZwAFdmFsdWUAIwAHDgAlAAcOeDwAAgMBFRgCAgQCDgQIDxcLAAABAQGA" +
+ "gATkAgIB/AIAABAAAAAAAAAAAQAAAAAAAAABAAAAFgAAAHAAAAACAAAACgAAAMgAAAADAAAAAgAA" +
+ "APAAAAAEAAAAAQAAAAgBAAAFAAAABQAAABABAAAGAAAAAQAAADgBAAADEAAAAQAAAFgBAAABIAAA" +
+ "AgAAAGQBAAAGIAAAAQAAAKQBAAABEAAAAQAAALQBAAACIAAAFgAAALoBAAADIAAAAgAAAP0CAAAE" +
+ "IAAAAgAAAAkDAAAAIAAAAQAAABgDAAAAEAAAAQAAACgDAAA=");
+
+ public static void run() {
+ Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+ doTest(new Transform());
+ }
+
+ public static void doTest(Transform t) {
+ t.sayHi();
+ Redefinition.doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+ try {
+ t.sayHi();
+ } catch (Throwable e) {
+ System.out.println("Caught exception " + e.getClass().getName() + ": " + e.getMessage());
+ }
+ }
+}
diff --git a/test/1938-transform-abstract-single-impl/expected.txt b/test/1938-transform-abstract-single-impl/expected.txt
new file mode 100644
index 0000000..6a06f9b
--- /dev/null
+++ b/test/1938-transform-abstract-single-impl/expected.txt
@@ -0,0 +1,4 @@
+JNI_OnLoad called
+Running sayHi() - hello
+redefining TransformAbstract
+Running sayHi() - Goodbye
diff --git a/test/1938-transform-abstract-single-impl/info.txt b/test/1938-transform-abstract-single-impl/info.txt
new file mode 100644
index 0000000..5df8306
--- /dev/null
+++ b/test/1938-transform-abstract-single-impl/info.txt
@@ -0,0 +1,2 @@
+Tests that single-implementation abstract methods don't crash the runtime when
+their declaring class is redefined.
diff --git a/test/1938-transform-abstract-single-impl/run b/test/1938-transform-abstract-single-impl/run
new file mode 100755
index 0000000..adb1a1c
--- /dev/null
+++ b/test/1938-transform-abstract-single-impl/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --no-app-image
diff --git a/test/1938-transform-abstract-single-impl/src/Main.java b/test/1938-transform-abstract-single-impl/src/Main.java
new file mode 100644
index 0000000..7ac2172
--- /dev/null
+++ b/test/1938-transform-abstract-single-impl/src/Main.java
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import art.Redefinition;
+import java.util.Base64;
+public class Main {
+ static abstract class TransformAbstract {
+ public abstract void doSayHi();
+
+ public void sayHi() {
+ System.out.println("hello");
+ }
+ }
+
+ static final class TransformConcrete extends TransformAbstract {
+ public final void doSayHi() {
+ System.out.print("Running sayHi() - ");
+ sayHi();
+ }
+ }
+
+ public static native void ensureJitCompiled(Class k, String m);
+
+ /**
+ * base64 encoded class/dex file for
+ * static abstract class TransformAbstract {
+ * public abstract void doSayHi();
+ * public void sayHi() {
+ * System.out.println("Goodbye");
+ * }
+ * }
+ */
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAIQoABgAPCQAQABEIABIKABMAFAcAFgcAGQEABjxpbml0PgEAAygpVgEABENvZGUB" +
+ "AA9MaW5lTnVtYmVyVGFibGUBAAdkb1NheUhpAQAFc2F5SGkBAApTb3VyY2VGaWxlAQAJTWFpbi5q" +
+ "YXZhDAAHAAgHABoMABsAHAEAB0dvb2RieWUHAB0MAB4AHwcAIAEAFk1haW4kVHJhbnNmb3JtQWJz" +
+ "dHJhY3QBABFUcmFuc2Zvcm1BYnN0cmFjdAEADElubmVyQ2xhc3NlcwEAEGphdmEvbGFuZy9PYmpl" +
+ "Y3QBABBqYXZhL2xhbmcvU3lzdGVtAQADb3V0AQAVTGphdmEvaW8vUHJpbnRTdHJlYW07AQATamF2" +
+ "YS9pby9QcmludFN0cmVhbQEAB3ByaW50bG4BABUoTGphdmEvbGFuZy9TdHJpbmc7KVYBAARNYWlu" +
+ "BCAABQAGAAAAAAADAAAABwAIAAEACQAAAB0AAQABAAAABSq3AAGxAAAAAQAKAAAABgABAAAAHAQB" +
+ "AAsACAAAAAEADAAIAAEACQAAACUAAgABAAAACbIAAhIDtgAEsQAAAAEACgAAAAoAAgAAAB8ACAAg" +
+ "AAIADQAAAAIADgAYAAAACgABAAUAFQAXBAg=");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQCQkoTiKzIz0l96rtsnUxdY4Kwx+YINWFHEAwAAcAAAAHhWNBIAAAAAAAAAAAADAAAV" +
+ "AAAAcAAAAAkAAADEAAAAAgAAAOgAAAABAAAAAAEAAAUAAAAIAQAAAQAAADABAAB0AgAAUAEAAKoB" +
+ "AACyAQAAuwEAANUBAADdAQAAAQIAACECAAA4AgAATAIAAGACAAB0AgAAfwIAAJICAACVAgAAmQIA" +
+ "AKYCAACvAgAAtQIAALoCAADDAgAAygIAAAIAAAADAAAABAAAAAUAAAAGAAAABwAAAAgAAAAJAAAA" +
+ "DAAAAAwAAAAIAAAAAAAAAA0AAAAIAAAApAEAAAcABAARAAAAAAAAAAAAAAAAAAAADwAAAAAAAAAT" +
+ "AAAABAABABIAAAAFAAAAAAAAAAAAAAAABAAABQAAAAAAAAAKAAAAlAEAAOwCAAAAAAAAAgAAANwC" +
+ "AADiAgAAAQABAAEAAADRAgAABAAAAHAQBAAAAA4AAwABAAIAAADWAgAACAAAAGIAAAAaAQEAbiAD" +
+ "ABAADgBQAQAAAAAAAAAAAAAAAAAAAQAAAAYABjxpbml0PgAHR29vZGJ5ZQAYTE1haW4kVHJhbnNm" +
+ "b3JtQWJzdHJhY3Q7AAZMTWFpbjsAIkxkYWx2aWsvYW5ub3RhdGlvbi9FbmNsb3NpbmdDbGFzczsA" +
+ "HkxkYWx2aWsvYW5ub3RhdGlvbi9Jbm5lckNsYXNzOwAVTGphdmEvaW8vUHJpbnRTdHJlYW07ABJM" +
+ "amF2YS9sYW5nL09iamVjdDsAEkxqYXZhL2xhbmcvU3RyaW5nOwASTGphdmEvbGFuZy9TeXN0ZW07" +
+ "AAlNYWluLmphdmEAEVRyYW5zZm9ybUFic3RyYWN0AAFWAAJWTAALYWNjZXNzRmxhZ3MAB2RvU2F5" +
+ "SGkABG5hbWUAA291dAAHcHJpbnRsbgAFc2F5SGkABXZhbHVlABwABw4AHwAHDngAAgIBFBgBAgMC" +
+ "DiQIBBAXCwAAAQIAgIAE3AIBgQgAAQH0AgAAEAAAAAAAAAABAAAAAAAAAAEAAAAVAAAAcAAAAAIA" +
+ "AAAJAAAAxAAAAAMAAAACAAAA6AAAAAQAAAABAAAAAAEAAAUAAAAFAAAACAEAAAYAAAABAAAAMAEA" +
+ "AAMQAAABAAAAUAEAAAEgAAACAAAAXAEAAAYgAAABAAAAlAEAAAEQAAABAAAApAEAAAIgAAAVAAAA" +
+ "qgEAAAMgAAACAAAA0QIAAAQgAAACAAAA3AIAAAAgAAABAAAA7AIAAAAQAAABAAAAAAMAAA==");
+
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
+ Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+
+ ensureJitCompiled(TransformAbstract.class, "sayHi");
+ ensureJitCompiled(TransformConcrete.class, "doSayHi");
+
+ TransformAbstract t1 = new TransformConcrete();
+ t1.doSayHi();
+
+ assertSingleImplementation(TransformAbstract.class, "doSayHi", true);
+
+ System.out.println("redefining TransformAbstract");
+ Redefinition.doCommonClassRedefinition(TransformAbstract.class, CLASS_BYTES, DEX_BYTES);
+
+ t1.doSayHi();
+ }
+
+ private static native boolean hasSingleImplementation(Class<?> clazz, String method_name);
+ private static void assertSingleImplementation(Class<?> clazz, String method_name, boolean b) {
+ if (hasSingleImplementation(clazz, method_name) != b) {
+ System.out.println(clazz + "." + method_name +
+ " doesn't have single implementation value of " + b);
+ }
+ }
+}
diff --git a/test/1938-transform-abstract-single-impl/src/art/Redefinition.java b/test/1938-transform-abstract-single-impl/src/art/Redefinition.java
new file mode 100644
index 0000000..56d2938
--- /dev/null
+++ b/test/1938-transform-abstract-single-impl/src/art/Redefinition.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.ArrayList;
+// Common Redefinition functions. Placed here for use by CTS
+public class Redefinition {
+ public static final class CommonClassDefinition {
+ public final Class<?> target;
+ public final byte[] class_file_bytes;
+ public final byte[] dex_file_bytes;
+
+ public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
+ this.target = target;
+ this.class_file_bytes = class_file_bytes;
+ this.dex_file_bytes = dex_file_bytes;
+ }
+ }
+
+ // A set of possible test configurations. Test should set this if they need to.
+ // This must be kept in sync with the defines in ti-agent/common_helper.cc
+ public static enum Config {
+ COMMON_REDEFINE(0),
+ COMMON_RETRANSFORM(1),
+ COMMON_TRANSFORM(2);
+
+ private final int val;
+ private Config(int val) {
+ this.val = val;
+ }
+ }
+
+ public static void setTestConfiguration(Config type) {
+ nativeSetTestConfiguration(type.val);
+ }
+
+ private static native void nativeSetTestConfiguration(int type);
+
+ // Transforms the class
+ public static native void doCommonClassRedefinition(Class<?> target,
+ byte[] classfile,
+ byte[] dexfile);
+
+ public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
+ ArrayList<Class<?>> classes = new ArrayList<>();
+ ArrayList<byte[]> class_files = new ArrayList<>();
+ ArrayList<byte[]> dex_files = new ArrayList<>();
+
+ for (CommonClassDefinition d : defs) {
+ classes.add(d.target);
+ class_files.add(d.class_file_bytes);
+ dex_files.add(d.dex_file_bytes);
+ }
+ doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
+ class_files.toArray(new byte[0][]),
+ dex_files.toArray(new byte[0][]));
+ }
+
+ public static void addMultiTransformationResults(CommonClassDefinition... defs) {
+ for (CommonClassDefinition d : defs) {
+ addCommonTransformationResult(d.target.getCanonicalName(),
+ d.class_file_bytes,
+ d.dex_file_bytes);
+ }
+ }
+
+ public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
+ byte[][] classfiles,
+ byte[][] dexfiles);
+ public static native void doCommonClassRetransformation(Class<?>... target);
+ public static native void setPopRetransformations(boolean pop);
+ public static native void popTransformationFor(String name);
+ public static native void enableCommonRetransformation(boolean enable);
+ public static native void addCommonTransformationResult(String target_name,
+ byte[] class_bytes,
+ byte[] dex_bytes);
+}
diff --git a/test/597-deopt-invoke-stub/run b/test/597-deopt-invoke-stub/run
index bc04498..53b7c4c 100644
--- a/test/597-deopt-invoke-stub/run
+++ b/test/597-deopt-invoke-stub/run
@@ -14,5 +14,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# We want to run in debuggable mode and compiled.
-exec ${RUN} --jit -Xcompiler-option --debuggable "${@}"
+# In order to test deoptimizing at quick-to-interpreter bridge,
+# we want to run in debuggable mode with jit compilation.
+# We also bump up the jit threshold to 10 to make sure that the method
+# that should be interpreted is not compiled.
+exec ${RUN} --jit --runtime-option -Xjitthreshold:10000 -Xcompiler-option --debuggable "${@}"
diff --git a/test/knownfailures.json b/test/knownfailures.json
index 229d618..68e9eb8 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -237,7 +237,7 @@
},
{
"tests": "597-deopt-invoke-stub",
- "variant": "interp-ac | interpreter | optimizing | trace | stream",
+ "variant": "speed-profile | interp-ac | interpreter | optimizing | trace | stream",
"description": ["This test expects JIT compilation and no AOT for",
"testing deoptimizing at quick-to-interpreter bridge."]
},
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index 4f99ac3..ab604b2 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -70,6 +70,7 @@
make_command="make $j_arg $showcommands build-art-host-tests $common_targets dx-tests"
make_command+=" ${out_dir}/host/linux-x86/lib/libjavacoretests.so "
make_command+=" ${out_dir}/host/linux-x86/lib64/libjavacoretests.so"
+ make_command+=" libwrapagentpropertiesd libwrapagentproperties"
elif [[ $mode == "target" ]]; then
make_command="make $j_arg $showcommands build-art-target-tests $common_targets"
make_command+=" libjavacrypto libjavacoretests libnetd_client linker toybox toolbox sh"
diff --git a/tools/libjdwp_art_failures.txt b/tools/libjdwp_art_failures.txt
index 6b5daec..57d3ce7 100644
--- a/tools/libjdwp_art_failures.txt
+++ b/tools/libjdwp_art_failures.txt
@@ -71,6 +71,33 @@
"org.apache.harmony.jpda.tests.jdwp.EventModifiers.InstanceOnlyModifierTest#testMethodExit",
"org.apache.harmony.jpda.tests.jdwp.EventModifiers.InstanceOnlyModifierTest#testMethodExitWithReturnValue" ]
},
+/* TODO Investigate these failures more closely */
+{
+ description: "Tests that fail when run on the chromium buildbots against the prebuilt libjdwp.so in certain configurations",
+ result: EXEC_FAILED,
+ bug: 67497270,
+ names: [
+ "org.apache.harmony.jpda.tests.jdwp.Events.CombinedEvents003Test#testCombinedEvents003_01",
+ "org.apache.harmony.jpda.tests.jdwp.Events.CombinedEventsTest#testCombinedEvents_01",
+ "org.apache.harmony.jpda.tests.jdwp.Events.CombinedEventsTest#testCombinedEvents_02",
+ "org.apache.harmony.jpda.tests.jdwp.Events.CombinedEventsTest#testCombinedEvents_03",
+ "org.apache.harmony.jpda.tests.jdwp.Events.CombinedEventsTest#testCombinedEvents_04",
+ "org.apache.harmony.jpda.tests.jdwp.Events.CombinedEventsTest#testCombinedEvents_06",
+ "org.apache.harmony.jpda.tests.jdwp.Events.VMDeathTest#testVMDeathEvent",
+ "org.apache.harmony.jpda.tests.jdwp.MultiSession.ClassPrepareTest#testClassPrepare001",
+ "org.apache.harmony.jpda.tests.jdwp.MultiSession.ExceptionTest#testException001",
+ "org.apache.harmony.jpda.tests.jdwp.MultiSession.FieldAccessTest#testFieldAccess001",
+ "org.apache.harmony.jpda.tests.jdwp.MultiSession.FieldModificationTest#testFieldModification001",
+ "org.apache.harmony.jpda.tests.jdwp.MultiSession.SingleStepTest#testSingleStep001",
+ "org.apache.harmony.jpda.tests.jdwp.MultiSession.VMDeathTest#testVMDeathRequest",
+ "org.apache.harmony.jpda.tests.jdwp.ReferenceType.SignatureWithGenericTest#testSignatureWithGeneric001",
+ "org.apache.harmony.jpda.tests.jdwp.StackFrame.GetValues002Test#testGetValues005_Int2",
+ "org.apache.harmony.jpda.tests.jdwp.VirtualMachine.SetDefaultStratumTest#testSetDefaultStratum001",
+ "org.apache.harmony.jpda.tests.jdwp.ThreadReference.StatusTest#testStatus001",
+ "org.apache.harmony.jpda.tests.jdwp.VirtualMachine.AllClassesTest#testAllClasses002",
+ "org.apache.harmony.jpda.tests.jdwp.VirtualMachine.AllClassesWithGenericTest#testAllClassesWithGeneric001"
+ ]
+},
/* TODO Categorize these failures more. */
{
description: "Tests that fail on both ART and RI. These tests are likely incorrect",