Merge "Revert "Revert "Re-enable boot image string sharpening."""
diff --git a/Android.bp b/Android.bp
index 835048d..77b9ac3 100644
--- a/Android.bp
+++ b/Android.bp
@@ -1,6 +1,34 @@
+// TODO: These should be handled with transitive static library dependencies
+art_static_dependencies = [
+ // Note: the order is important because of static linking resolution.
+ "libziparchive",
+ "libnativehelper",
+ "libnativebridge",
+ "libnativeloader",
+ "libsigchain_dummy",
+ "liblog",
+ "libz",
+ "libbacktrace",
+ "libcutils",
+ "libunwindbacktrace",
+ "libutils",
+ "libbase",
+ "liblz4",
+ "liblzma",
+]
+
subdirs = [
+ "benchmark",
"build",
"compiler",
+ "dalvikvm",
+ "dexdump",
+ "dexlayout",
+ "dexlist",
+ "disassembler",
+ "oatdump",
"runtime",
"sigchainlib",
+ "tools/cpp-define-generator",
+ "tools/dmtracedump",
]
diff --git a/Android.mk b/Android.mk
index 9d0062b..0ed5d87 100644
--- a/Android.mk
+++ b/Android.mk
@@ -76,19 +76,13 @@
########################################################################
# product rules
-include $(art_path)/dexdump/Android.mk
-include $(art_path)/dexlist/Android.mk
include $(art_path)/dex2oat/Android.mk
-include $(art_path)/disassembler/Android.mk
-include $(art_path)/oatdump/Android.mk
include $(art_path)/imgdiag/Android.mk
include $(art_path)/patchoat/Android.mk
include $(art_path)/profman/Android.mk
-include $(art_path)/dalvikvm/Android.mk
include $(art_path)/tools/Android.mk
include $(art_path)/tools/ahat/Android.mk
include $(art_path)/tools/dexfuzz/Android.mk
-include $(art_path)/tools/dmtracedump/Android.mk
include $(art_path)/libart_fake/Android.mk
@@ -113,7 +107,6 @@
include $(art_path)/build/Android.common_test.mk
include $(art_path)/build/Android.gtest.mk
include $(art_path)/test/Android.run-test.mk
-include $(art_path)/benchmark/Android.mk
TEST_ART_ADB_ROOT_AND_REMOUNT := \
(adb root && \
diff --git a/benchmark/Android.bp b/benchmark/Android.bp
new file mode 100644
index 0000000..617f5b0
--- /dev/null
+++ b/benchmark/Android.bp
@@ -0,0 +1,43 @@
+//
+// Copyright (C) 2015 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+art_cc_library {
+ name: "libartbenchmark",
+ host_supported: true,
+ defaults: ["art_defaults", "art_debug_defaults"],
+ srcs: [
+ "jobject-benchmark/jobject_benchmark.cc",
+ "jni-perf/perf_jni.cc",
+ "scoped-primitive-array/scoped_primitive_array.cc",
+ ],
+ shared_libs: [
+ "libart",
+ "libbacktrace",
+ "libnativehelper",
+ ],
+ clang: true,
+ target: {
+ android: {
+ shared_libs: ["libdl"],
+ },
+ host: {
+ host_ldlibs: ["-ldl", "-lpthread"],
+ },
+ },
+ cflags: [
+ "-Wno-frame-larger-than=",
+ ],
+}
diff --git a/benchmark/Android.mk b/benchmark/Android.mk
deleted file mode 100644
index 2360bcc..0000000
--- a/benchmark/Android.mk
+++ /dev/null
@@ -1,79 +0,0 @@
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-LOCAL_PATH := $(call my-dir)
-
-include art/build/Android.common_build.mk
-
-LIBARTBENCHMARK_COMMON_SRC_FILES := \
- jobject-benchmark/jobject_benchmark.cc \
- jni-perf/perf_jni.cc \
- scoped-primitive-array/scoped_primitive_array.cc
-
-# $(1): target or host
-define build-libartbenchmark
- ifneq ($(1),target)
- ifneq ($(1),host)
- $$(error expected target or host for argument 1, received $(1))
- endif
- endif
-
- art_target_or_host := $(1)
-
- include $(CLEAR_VARS)
- LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
- LOCAL_MODULE := libartbenchmark
- ifeq ($$(art_target_or_host),target)
- LOCAL_MODULE_TAGS := tests
- endif
- LOCAL_SRC_FILES := $(LIBARTBENCHMARK_COMMON_SRC_FILES)
- LOCAL_SHARED_LIBRARIES += libart libbacktrace libnativehelper
- LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime
- LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
- LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
- ifeq ($$(art_target_or_host),target)
- LOCAL_CLANG := $(ART_TARGET_CLANG)
- $(call set-target-local-cflags-vars,debug)
- LOCAL_SHARED_LIBRARIES += libdl
- LOCAL_MULTILIB := both
- # LOCAL_MODULE_PATH_32 := $(ART_TARGET_OUT)/$(ART_TARGET_ARCH_32)
- # LOCAL_MODULE_PATH_64 := $(ART_TARGET_OUT)/$(ART_TARGET_ARCH_64)
- LOCAL_MODULE_TARGET_ARCH := $(ART_SUPPORTED_ARCH)
- include $(BUILD_SHARED_LIBRARY)
- else # host
- LOCAL_CLANG := $(ART_HOST_CLANG)
- LOCAL_CFLAGS := $(ART_HOST_CFLAGS) $(ART_HOST_DEBUG_CFLAGS)
- LOCAL_ASFLAGS := $(ART_HOST_ASFLAGS) $(ART_HOST_DEBUG_ASFLAGS)
- LOCAL_LDLIBS := -ldl -lpthread
- LOCAL_IS_HOST_MODULE := true
- LOCAL_MULTILIB := both
- include $(BUILD_HOST_SHARED_LIBRARY)
- endif
-
- # Clear locally used variables.
- art_target_or_host :=
-endef
-
-ifeq ($(ART_BUILD_TARGET),true)
- $(eval $(call build-libartbenchmark,target))
-endif
-ifeq ($(ART_BUILD_HOST),true)
- $(eval $(call build-libartbenchmark,host))
-endif
-
-# Clear locally used variables.
-LOCAL_PATH :=
-LIBARTBENCHMARK_COMMON_SRC_FILES :=
diff --git a/build/Android.bp b/build/Android.bp
index be7dafd..630cf3c 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -101,6 +101,9 @@
// Apple, it's a pain.
"-Wmissing-noreturn",
],
+ host_ldlibs: [
+ "-lrt",
+ ],
},
host: {
cflags: [
@@ -108,6 +111,10 @@
// clang/libunwind bugs that cause SEGVs in run-test-004-ThreadStress.
"-fno-omit-frame-pointer",
],
+ host_ldlibs: [
+ "-ldl",
+ "-lpthread",
+ ],
},
},
@@ -150,9 +157,19 @@
"-DDYNAMIC_ANNOTATIONS_ENABLED=1",
"-DVIXL_DEBUG",
"-UNDEBUG",
- "-Wno-frame-larger-than=",
],
asflags: [
"-UNDEBUG",
],
+ target: {
+ // This has to be duplicated for android and host to make sure it
+ // comes after the -Wframe-larger-than warnings inserted by art.go
+ // target-specific properties
+ android: {
+ cflags: ["-Wno-frame-larger-than="],
+ },
+ host: {
+ cflags: ["-Wno-frame-larger-than="],
+ },
+ },
}
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index b5d41d9..04a0344 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -238,6 +238,12 @@
art_cflags += -DART_USE_TLAB=1
endif
+# Are additional statically-linked ART host binaries (dex2oats,
+# oatdumps, etc.) getting built?
+ifeq ($(ART_BUILD_HOST_STATIC),true)
+ art_cflags += -DART_BUILD_HOST_STATIC=1
+endif
+
# Cflags for non-debug ART and ART tools.
art_non_debug_cflags := \
$(ART_NDEBUG_OPT_FLAG)
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index 86bb475..e88d027 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -18,6 +18,7 @@
ART_ANDROID_COMMON_PATH_MK := true
include art/build/Android.common.mk
+include art/build/Android.common_build.mk
# Directory used for dalvik-cache on device.
ART_TARGET_DALVIK_CACHE_DIR := /data/dalvik-cache
@@ -113,4 +114,29 @@
ART_TARGET_SHARED_LIBRARY_DEPENDENCIES += $(foreach lib,$(ART_CORE_SHARED_LIBRARIES), $(2ND_TARGET_OUT_SHARED_LIBRARIES)/$(lib).so)
endif
+ART_CORE_EXECUTABLES := \
+ dex2oat \
+ imgdiag \
+ oatdump \
+ patchoat \
+ profman \
+
+# Depend on the -target or -host phony targets generated by the build system
+# for each module
+ART_TARGET_EXECUTABLES := dalvikvm-target
+ifneq ($(ART_BUILD_TARGET_NDEBUG),false)
+ART_TARGET_EXECUTABLES += $(foreach name,$(ART_CORE_EXECUTABLES),$(name)-target)
+endif
+ifneq ($(ART_BUILD_TARGET_DEBUG),false)
+ART_TARGET_EXECUTABLES += $(foreach name,$(ART_CORE_EXECUTABLES),$(name)d-target)
+endif
+
+ART_HOST_EXECUTABLES := dalvikvm-host
+ifneq ($(ART_BUILD_HOST_NDEBUG),false)
+ART_HOST_EXECUTABLES += $(foreach name,$(ART_CORE_EXECUTABLES),$(name)-host)
+endif
+ifneq ($(ART_BUILD_HOST_DEBUG),false)
+ART_HOST_EXECUTABLES += $(foreach name,$(ART_CORE_EXECUTABLES),$(name)d-host)
+endif
+
endif # ART_ANDROID_COMMON_PATH_MK
diff --git a/build/Android.executable.mk b/build/Android.executable.mk
index 2db16af..c35833d 100644
--- a/build/Android.executable.mk
+++ b/build/Android.executable.mk
@@ -16,12 +16,9 @@
include art/build/Android.common_build.mk
-ART_HOST_EXECUTABLES ?=
-ART_TARGET_EXECUTABLES ?=
-
ART_EXECUTABLES_CFLAGS :=
-# $(1): executable ("d" will be appended for debug version)
+# $(1): executable ("d" will be appended for debug version, "s" will be appended for static version)
# $(2): source
# $(3): extra shared libraries
# $(4): extra include directories
@@ -70,7 +67,7 @@
endif
ifeq ($$(art_static_or_shared),static)
- LOCAL_MODULE := $(LOCAL_MODULE)s
+ LOCAL_MODULE := $$(LOCAL_MODULE)s
endif
LOCAL_CFLAGS := $(ART_EXECUTABLES_CFLAGS)
@@ -169,11 +166,9 @@
ifeq ($$(art_target_or_host),target)
include $(BUILD_EXECUTABLE)
- ART_TARGET_EXECUTABLES := $(ART_TARGET_EXECUTABLES) $$(foreach name,$$(art_out_binary_name),$(TARGET_OUT_EXECUTABLES)/$$(name))
else # host
LOCAL_IS_HOST_MODULE := true
include $(BUILD_HOST_EXECUTABLE)
- ART_HOST_EXECUTABLES := $(ART_HOST_EXECUTABLES) $$(foreach name,$$(art_out_binary_name),$(HOST_OUT_EXECUTABLES)/$$(name))
endif
# Clear out local variables now that we're done with them.
@@ -254,3 +249,20 @@
)
)
endef
+
+# Note: the order is important because of static linking resolution.
+ART_STATIC_DEPENDENCIES := \
+ libziparchive \
+ libnativehelper \
+ libnativebridge \
+ libnativeloader \
+ libsigchain_dummy \
+ liblog \
+ libz \
+ libbacktrace \
+ libcutils \
+ libunwindbacktrace \
+ libutils \
+ libbase \
+ liblz4 \
+ liblzma
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 9ec072f..c09241a 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -126,6 +126,19 @@
$(TARGET_CORE_IMAGE_default_no-pic_32) \
dexdump2
+# The dexlayout test requires an image and the dexlayout utility.
+# TODO: rename into dexdump when migration completes
+ART_GTEST_dexlayout_test_HOST_DEPS := \
+ $(HOST_CORE_IMAGE_default_no-pic_64) \
+ $(HOST_CORE_IMAGE_default_no-pic_32) \
+ $(HOST_OUT_EXECUTABLES)/dexlayout \
+ $(HOST_OUT_EXECUTABLES)/dexdump2
+ART_GTEST_dexlayout_test_TARGET_DEPS := \
+ $(TARGET_CORE_IMAGE_default_no-pic_64) \
+ $(TARGET_CORE_IMAGE_default_no-pic_32) \
+ dexlayout \
+ dexdump2
+
# The dexlist test requires an image and the dexlist utility.
ART_GTEST_dexlist_test_HOST_DEPS := \
$(HOST_CORE_IMAGE_default_no-pic_64) \
@@ -170,6 +183,7 @@
RUNTIME_GTEST_COMMON_SRC_FILES := \
cmdline/cmdline_parser_test.cc \
dexdump/dexdump_test.cc \
+ dexlayout/dexlayout_test.cc \
dexlist/dexlist_test.cc \
dex2oat/dex2oat_test.cc \
imgdiag/imgdiag_test.cc \
@@ -521,6 +535,7 @@
(adb shell "$(GCOV_ENV) LD_LIBRARY_PATH=$(3) ANDROID_ROOT=$(ART_GTEST_TARGET_ANDROID_ROOT) \
valgrind --leak-check=full --error-exitcode=1 --workaround-gcc296-bugs=yes \
--suppressions=$(ART_TARGET_TEST_DIR)/valgrind-target-suppressions.txt \
+ --num-callers=50 \
$(ART_TARGET_NATIVETEST_DIR)/$(TARGET_$(2)ARCH)/$(1) && touch $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$$$$PPID" \
&& (adb pull $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$$$$PPID /tmp/ \
&& $$(call ART_TEST_PASSED,$$@)) \
@@ -577,7 +592,8 @@
$(hide) $$(call ART_TEST_SKIP,$$@) && \
VALGRIND_LIB=$(HOST_OUT)/lib64/valgrind \
$(HOST_OUT_EXECUTABLES)/valgrind --leak-check=full --error-exitcode=1 \
- --suppressions=art/test/valgrind-suppressions.txt $$< && \
+ --suppressions=art/test/valgrind-suppressions.txt --num-callers=50 \
+ $$< && \
$$(call ART_TEST_PASSED,$$@) || $$(call ART_TEST_FAILED,$$@)
ART_TEST_HOST_VALGRIND_GTEST$$($(2)ART_PHONY_TEST_HOST_SUFFIX)_RULES += valgrind-$$(gtest_rule)
diff --git a/build/art.go b/build/art.go
index 4e64dcf..9cab3b9 100644
--- a/build/art.go
+++ b/build/art.go
@@ -67,6 +67,12 @@
cflags = append(cflags, "-fstack-protector")
}
+ // Are additional statically-linked ART host binaries
+ // (dex2oats, oatdumps, etc.) getting built?
+ if envTrue(ctx, "ART_BUILD_HOST_STATIC") {
+ cflags = append(cflags, "-DART_BUILD_HOST_STATIC=1")
+ }
+
return cflags, asflags
}
@@ -135,8 +141,42 @@
type artGlobalDefaults struct{}
+func (a *artCustomLinkerCustomizer) CustomizeProperties(ctx android.CustomizePropertiesContext) {
+ linker := envDefault(ctx, "CUSTOM_TARGET_LINKER", "")
+ if linker != "" {
+ type props struct {
+ DynamicLinker string
+ }
+
+ p := &props{}
+ p.DynamicLinker = linker
+ ctx.AppendProperties(p)
+ }
+}
+
+type artCustomLinkerCustomizer struct{}
+
+func (a *artPrefer32BitCustomizer) CustomizeProperties(ctx android.CustomizePropertiesContext) {
+ if envTrue(ctx, "HOST_PREFER_32_BIT") {
+ type props struct {
+ Target struct {
+ Host struct {
+ Compile_multilib string
+ }
+ }
+ }
+
+ p := &props{}
+ p.Target.Host.Compile_multilib = "prefer32"
+ ctx.AppendProperties(p)
+ }
+}
+
+type artPrefer32BitCustomizer struct{}
+
func init() {
soong.RegisterModuleType("art_cc_library", artLibrary)
+ soong.RegisterModuleType("art_cc_binary", artBinary)
soong.RegisterModuleType("art_cc_defaults", artDefaultsFactory)
soong.RegisterModuleType("art_global_defaults", artGlobalDefaultsFactory)
}
@@ -164,6 +204,16 @@
c := &codegenCustomizer{}
android.AddCustomizer(library, c)
props = append(props, &c.codegenProperties)
+
+ return module, props
+}
+
+func artBinary() (blueprint.Module, []interface{}) {
+ binary, _ := cc.NewBinary(android.HostAndDeviceSupported)
+ module, props := binary.Init()
+
+ android.AddCustomizer(binary, &artCustomLinkerCustomizer{})
+ android.AddCustomizer(binary, &artPrefer32BitCustomizer{})
return module, props
}
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 289adf8..595a824 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -188,6 +188,7 @@
"liblzma",
],
include_dirs: ["art/disassembler"],
+ export_include_dirs: ["."],
}
gensrcs {
@@ -245,8 +246,8 @@
art_cc_library {
name: "libartd-compiler",
defaults: [
- "libart-compiler-defaults",
"art_debug_defaults",
+ "libart-compiler-defaults",
],
codegen: {
arm: {
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index c942375..5239121 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -47,12 +47,12 @@
// Create an OatMethod based on pointers (for unit tests).
OatFile::OatMethod CreateOatMethod(const void* code);
- void MakeExecutable(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
+ void MakeExecutable(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
static void MakeExecutable(const void* code_start, size_t code_length);
void MakeExecutable(mirror::ClassLoader* class_loader, const char* class_name)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
protected:
virtual void SetUp();
@@ -81,17 +81,17 @@
virtual void TearDown();
void CompileClass(mirror::ClassLoader* class_loader, const char* class_name)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- void CompileMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
+ void CompileMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
void CompileDirectMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name,
const char* method_name, const char* signature)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CompileVirtualMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name,
const char* method_name, const char* signature)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CreateCompilerDriver(Compiler::Kind kind, InstructionSet isa, size_t number_of_threads = 2U);
diff --git a/compiler/compiler.h b/compiler/compiler.h
index a955f3c..9e5fb83 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -69,12 +69,12 @@
jit::JitCodeCache* code_cache ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
bool osr ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return false;
}
virtual uintptr_t GetEntryPointOf(ArtMethod* method) const
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
uint64_t GetMaximumCompilationTimeBeforeWarning() const {
return maximum_compilation_time_before_warning_;
diff --git a/compiler/debug/elf_debug_info_writer.h b/compiler/debug/elf_debug_info_writer.h
index e8e278d..0a4f094 100644
--- a/compiler/debug/elf_debug_info_writer.h
+++ b/compiler/debug/elf_debug_info_writer.h
@@ -275,7 +275,7 @@
owner_->builder_->GetDebugInfo()->WriteFully(buffer.data(), buffer.size());
}
- void Write(const ArrayRef<mirror::Class*>& types) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Write(const ArrayRef<mirror::Class*>& types) REQUIRES_SHARED(Locks::mutator_lock_) {
using namespace dwarf; // NOLINT. For easy access to DWARF constants.
info_.StartTag(DW_TAG_compile_unit);
@@ -466,7 +466,7 @@
// Linkage name uniquely identifies type.
// It is used to determine the dynamic type of objects.
// We use the methods_ field of class since it is unique and it is not moved by the GC.
- void WriteLinkageName(mirror::Class* type) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void WriteLinkageName(mirror::Class* type) REQUIRES_SHARED(Locks::mutator_lock_) {
auto* methods_ptr = type->GetMethodsPtr();
if (methods_ptr == nullptr) {
// Some types might have no methods. Allocate empty array instead.
diff --git a/compiler/debug/elf_debug_writer.cc b/compiler/debug/elf_debug_writer.cc
index b7e000a..5bfdd16 100644
--- a/compiler/debug/elf_debug_writer.cc
+++ b/compiler/debug/elf_debug_writer.cc
@@ -145,7 +145,7 @@
InstructionSet isa,
const InstructionSetFeatures* features,
const ArrayRef<mirror::Class*>& types)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::vector<uint8_t> buffer;
buffer.reserve(KB);
VectorOutputStream out("Debug ELF file", &buffer);
diff --git a/compiler/debug/elf_debug_writer.h b/compiler/debug/elf_debug_writer.h
index 6f52249..b0542c7 100644
--- a/compiler/debug/elf_debug_writer.h
+++ b/compiler/debug/elf_debug_writer.h
@@ -56,7 +56,7 @@
InstructionSet isa,
const InstructionSetFeatures* features,
const ArrayRef<mirror::Class*>& types)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
std::vector<MethodDebugInfo> MakeTrampolineInfos(const OatHeader& oat_header);
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index dbdfa24..43fc687 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -64,7 +64,7 @@
* @return true if the method is a candidate for inlining, false otherwise.
*/
bool AnalyseMethodCode(verifier::MethodVerifier* verifier)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_);
/**
* Check whether a particular method index corresponds to an intrinsic or special function.
diff --git a/compiler/dex/quick_compiler_callbacks.h b/compiler/dex/quick_compiler_callbacks.h
index 4f5ea76..1f69686 100644
--- a/compiler/dex/quick_compiler_callbacks.h
+++ b/compiler/dex/quick_compiler_callbacks.h
@@ -38,7 +38,7 @@
~QuickCompilerCallbacks() { }
void MethodVerified(verifier::MethodVerifier* verifier)
- SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
void ClassRejected(ClassReference ref) OVERRIDE;
diff --git a/compiler/dex/verification_results.h b/compiler/dex/verification_results.h
index 1af11a8..6afd1ab 100644
--- a/compiler/dex/verification_results.h
+++ b/compiler/dex/verification_results.h
@@ -43,7 +43,7 @@
~VerificationResults();
void ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!verified_methods_lock_);
const VerifiedMethod* GetVerifiedMethod(MethodReference ref)
diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h
index 495acf0..04331e5 100644
--- a/compiler/dex/verified_method.h
+++ b/compiler/dex/verified_method.h
@@ -44,7 +44,7 @@
typedef SafeMap<uint32_t, DexFileReference> DequickenMap;
static const VerifiedMethod* Create(verifier::MethodVerifier* method_verifier, bool compile)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
~VerifiedMethod() = default;
const DevirtualizationMap& GetDevirtMap() const {
@@ -100,15 +100,15 @@
// Generate devirtualizaion map into devirt_map_.
void GenerateDevirtMap(verifier::MethodVerifier* method_verifier)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Generate dequickening map into dequicken_map_. Returns false if there is an error.
bool GenerateDequickenMap(verifier::MethodVerifier* method_verifier)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Generate safe case set into safe_cast_set_.
void GenerateSafeCastSet(verifier::MethodVerifier* method_verifier)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
DevirtualizationMap devirt_map_;
// Dequicken map is required for compiling quickened byte codes. The quicken maps from
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 77ec4b7..b5bc2fb 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -492,7 +492,7 @@
static optimizer::DexToDexCompilationLevel GetDexToDexCompilationLevel(
Thread* self, const CompilerDriver& driver, Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file, const DexFile::ClassDef& class_def)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
auto* const runtime = Runtime::Current();
if (runtime->UseJitCompilation() || driver.GetCompilerOptions().VerifyAtRuntime()) {
// Verify at runtime shouldn't dex to dex since we didn't resolve of verify.
@@ -1026,7 +1026,7 @@
std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve)
: exceptions_to_resolve_(exceptions_to_resolve) {}
- virtual bool operator()(mirror::Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ virtual bool operator()(mirror::Class* c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
const auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
for (auto& m : c->GetMethods(pointer_size)) {
ResolveExceptionsForMethod(&m, pointer_size);
@@ -1036,7 +1036,7 @@
private:
void ResolveExceptionsForMethod(ArtMethod* method_handle, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = method_handle->GetCodeItem();
if (code_item == nullptr) {
return; // native or abstract method
@@ -1080,7 +1080,7 @@
explicit RecordImageClassesVisitor(std::unordered_set<std::string>* image_classes)
: image_classes_(image_classes) {}
- bool operator()(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool operator()(mirror::Class* klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
std::string temp;
image_classes_->insert(klass->GetDescriptor(&temp));
return true;
@@ -1161,7 +1161,7 @@
static void MaybeAddToImageClasses(Handle<mirror::Class> c,
std::unordered_set<std::string>* image_classes)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Thread* self = Thread::Current();
StackHandleScope<1> hs(self);
// Make a copy of the handle so that we don't clobber it doing Assign.
@@ -1216,7 +1216,7 @@
// Visitor for VisitReferences.
void operator()(mirror::Object* object, MemberOffset field_offset, bool /* is_static */) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* ref = object->GetFieldObject<mirror::Object>(field_offset);
if (ref != nullptr) {
VisitClinitClassesObject(ref);
@@ -1232,7 +1232,7 @@
const {}
void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
- void Walk() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Walk() REQUIRES_SHARED(Locks::mutator_lock_) {
// Use the initial classes as roots for a search.
for (mirror::Class* klass_root : image_classes_) {
VisitClinitClassesObject(klass_root);
@@ -1244,7 +1244,7 @@
public:
explicit FindImageClassesVisitor(ClinitImageUpdate* data) : data_(data) {}
- bool operator()(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool operator()(mirror::Class* klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
std::string temp;
const char* name = klass->GetDescriptor(&temp);
if (data_->image_class_descriptors_->find(name) != data_->image_class_descriptors_->end()) {
@@ -1265,7 +1265,7 @@
ClinitImageUpdate(std::unordered_set<std::string>* image_class_descriptors, Thread* self,
ClassLinker* linker)
- SHARED_REQUIRES(Locks::mutator_lock_) :
+ REQUIRES_SHARED(Locks::mutator_lock_) :
image_class_descriptors_(image_class_descriptors), self_(self) {
CHECK(linker != nullptr);
CHECK(image_class_descriptors != nullptr);
@@ -1284,7 +1284,7 @@
}
void VisitClinitClassesObject(mirror::Object* object) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(object != nullptr);
if (marked_objects_.find(object) != marked_objects_.end()) {
// Already processed.
@@ -1983,7 +1983,7 @@
// A fast version of SkipClass above if the class pointer is available
// that avoids the expensive FindInClassPath search.
static bool SkipClass(jobject class_loader, const DexFile& dex_file, mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(klass != nullptr);
const DexFile& original_dex_file = *klass->GetDexCache()->GetDexFile();
if (&dex_file != &original_dex_file) {
@@ -1998,7 +1998,7 @@
}
static void CheckAndClearResolveException(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(self->IsExceptionPending());
mirror::Throwable* exception = self->GetException();
std::string temp;
@@ -2289,7 +2289,7 @@
&dex_file,
dex_cache,
class_loader,
- &class_def,
+ class_def,
Runtime::Current()->GetCompilerCallbacks(),
true /* allow soft failures */,
log_level_,
@@ -2529,7 +2529,7 @@
class InitializeArrayClassesAndCreateConflictTablesVisitor : public ClassVisitor {
public:
- virtual bool operator()(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ virtual bool operator()(mirror::Class* klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) {
return true;
}
@@ -2546,7 +2546,7 @@
}
private:
- void FillIMTAndConflictTables(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void FillIMTAndConflictTables(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) {
if (!klass->ShouldHaveImt()) {
return;
}
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 2dd4651..fbc1edd 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -125,7 +125,7 @@
// Compile a single Method.
void CompileOne(Thread* self, ArtMethod* method, TimingLogger* timings)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!compiled_methods_lock_, !compiled_classes_lock_, !dex_to_dex_references_lock_);
VerificationResults* GetVerificationResults() const {
@@ -199,7 +199,7 @@
bool CanAssumeTypeIsPresentInDexCache(Handle<mirror::DexCache> dex_cache,
uint32_t type_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool CanAssumeStringIsPresentInDexCache(const DexFile& dex_file, uint32_t string_idx)
REQUIRES(!Locks::mutator_lock_);
@@ -208,7 +208,7 @@
bool CanAccessTypeWithoutChecks(uint32_t referrer_idx,
Handle<mirror::DexCache> dex_cache,
uint32_t type_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Are runtime access and instantiable checks necessary in the code?
// out_is_finalizable is set to whether the type is finalizable.
@@ -216,7 +216,7 @@
Handle<mirror::DexCache> dex_cache,
uint32_t type_idx,
bool* out_is_finalizable)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_idx,
bool* is_type_initialized, bool* use_direct_type_ptr,
@@ -230,23 +230,23 @@
// Get the DexCache for the
mirror::DexCache* GetDexCache(const DexCompilationUnit* mUnit)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa,
const DexCompilationUnit* mUnit)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Resolve compiling method's class. Returns null on failure.
mirror::Class* ResolveCompilingMethodsClass(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Class* ResolveClass(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, uint16_t type_index,
const DexCompilationUnit* mUnit)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Resolve a field. Returns null on failure, including incompatible class change.
// NOTE: Unlike ClassLinker's ResolveField(), this method enforces is_static.
@@ -254,40 +254,40 @@
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
uint32_t field_idx, bool is_static)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Resolve a field with a given dex file.
ArtField* ResolveFieldWithDexFile(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexFile* dex_file,
uint32_t field_idx, bool is_static)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get declaration location of a resolved field.
void GetResolvedFieldDexFileLocation(
ArtField* resolved_field, const DexFile** declaring_dex_file,
uint16_t* declaring_class_idx, uint16_t* declaring_field_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsFieldVolatile(ArtField* field) SHARED_REQUIRES(Locks::mutator_lock_);
- MemberOffset GetFieldOffset(ArtField* field) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsFieldVolatile(ArtField* field) REQUIRES_SHARED(Locks::mutator_lock_);
+ MemberOffset GetFieldOffset(ArtField* field) REQUIRES_SHARED(Locks::mutator_lock_);
// Find a dex cache for a dex file.
inline mirror::DexCache* FindDexCache(const DexFile* dex_file)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Can we fast-path an IGET/IPUT access to an instance field? If yes, compute the field offset.
std::pair<bool, bool> IsFastInstanceField(
mirror::DexCache* dex_cache, mirror::Class* referrer_class,
ArtField* resolved_field, uint16_t field_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Can we fast-path an SGET/SPUT access to a static field? If yes, compute the type index
// of the declaring class in the referrer's dex file.
std::pair<bool, bool> IsFastStaticField(
mirror::DexCache* dex_cache, mirror::Class* referrer_class,
ArtField* resolved_field, uint16_t field_idx, uint32_t* storage_index)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Return whether the declaring class of `resolved_method` is
// available to `referrer_class`. If this is true, compute the type
@@ -299,34 +299,34 @@
ArtMethod* resolved_method,
uint16_t method_idx,
uint32_t* storage_index)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Is static field's in referrer's class?
bool IsStaticFieldInReferrerClass(mirror::Class* referrer_class, ArtField* resolved_field)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Is static field's class initialized?
bool IsStaticFieldsClassInitialized(mirror::Class* referrer_class,
ArtField* resolved_field)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Resolve a method. Returns null on failure, including incompatible class change.
ArtMethod* ResolveMethod(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
uint32_t method_idx, InvokeType invoke_type, bool check_incompatible_class_change = true)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get declaration location of a resolved field.
void GetResolvedMethodDexFileLocation(
ArtMethod* resolved_method, const DexFile** declaring_dex_file,
uint16_t* declaring_class_idx, uint16_t* declaring_method_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get the index in the vtable of the method.
uint16_t GetResolvedMethodVTableIndex(
ArtMethod* resolved_method, InvokeType type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Can we fast-path an INVOKE? If no, returns 0. If yes, returns a non-zero opaque flags value
// for ProcessedInvoke() and computes the necessary lowering info.
@@ -336,13 +336,13 @@
mirror::Class* referrer_class, ArtMethod* resolved_method, InvokeType* invoke_type,
MethodReference* target_method, const MethodReference* devirt_target,
uintptr_t* direct_code, uintptr_t* direct_method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Is method's class initialized for an invoke?
// For static invokes to determine whether we need to consider potential call to <clinit>().
// For non-static invokes, assuming a non-null reference, the class is always initialized.
bool IsMethodsClassInitialized(mirror::Class* referrer_class, ArtMethod* resolved_method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get the layout of dex cache arrays for a dex file. Returns invalid layout if the
// dex cache arrays don't have a fixed layout.
@@ -357,7 +357,7 @@
ArtField** resolved_field,
mirror::Class** referrer_class,
mirror::DexCache** dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Can we fast path instance field access? Computes field's offset and volatility.
bool ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put,
@@ -368,7 +368,7 @@
const DexCompilationUnit* mUnit,
bool is_put,
const ScopedObjectAccess& soa)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Can we fastpath a interface, super class or virtual method call? Computes method's vtable
@@ -467,7 +467,7 @@
// Can we assume that the klass is loaded?
bool CanAssumeClassIsLoaded(mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool MayInline(const DexFile* inlined_from, const DexFile* inlined_into) const {
if (!kIsTargetBuild) {
@@ -497,7 +497,7 @@
ArtMember* resolved_member,
uint16_t member_idx,
uint32_t* storage_index)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Can `referrer_class` access the resolved `member`?
// Dispatch call to mirror::Class::CanAccessResolvedField or
@@ -509,13 +509,13 @@
ArtMember* member,
mirror::DexCache* dex_cache,
uint32_t field_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Can we assume that the klass is initialized?
bool CanAssumeClassIsInitialized(mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool CanReferrerAssumeClassIsInitialized(mirror::Class* referrer_class, mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// These flags are internal to CompilerDriver for collecting INVOKE resolution statistics.
// The only external contract is that unresolved method has flags 0 and resolved non-0.
@@ -546,7 +546,7 @@
/*out*/int* stats_flags,
MethodReference* target_method,
uintptr_t* direct_code, uintptr_t* direct_method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
void PreCompile(jobject class_loader,
@@ -605,7 +605,7 @@
void UpdateImageClasses(TimingLogger* timings) REQUIRES(!Locks::mutator_lock_);
static void FindClinitImageClassesCallback(mirror::Object* object, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void Compile(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
diff --git a/compiler/elf_writer_test.cc b/compiler/elf_writer_test.cc
index 449f514..d5f1663 100644
--- a/compiler/elf_writer_test.cc
+++ b/compiler/elf_writer_test.cc
@@ -38,16 +38,16 @@
#define EXPECT_ELF_FILE_ADDRESS(ef, expected_value, symbol_name, build_map) \
do { \
- void* addr = reinterpret_cast<void*>(ef->FindSymbolAddress(SHT_DYNSYM, \
- symbol_name, \
- build_map)); \
+ void* addr = reinterpret_cast<void*>((ef)->FindSymbolAddress(SHT_DYNSYM, \
+ symbol_name, \
+ build_map)); \
EXPECT_NE(nullptr, addr); \
EXPECT_LT(static_cast<uintptr_t>(ART_BASE_ADDRESS), reinterpret_cast<uintptr_t>(addr)); \
- if (expected_value == nullptr) { \
- expected_value = addr; \
+ if ((expected_value) == nullptr) { \
+ (expected_value) = addr; \
} \
EXPECT_EQ(expected_value, addr); \
- EXPECT_EQ(expected_value, ef->FindDynamicSymbolAddress(symbol_name)); \
+ EXPECT_EQ(expected_value, (ef)->FindDynamicSymbolAddress(symbol_name)); \
} while (false)
TEST_F(ElfWriterTest, dlsym) {
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index bb45999..7634510 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -111,7 +111,7 @@
}
static void CheckNoDexObjectsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Class* klass = obj->GetClass();
CHECK_NE(PrettyClass(klass), "com.android.dex.Dex");
}
@@ -687,7 +687,7 @@
class ComputeLazyFieldsForClassesVisitor : public ClassVisitor {
public:
- bool operator()(Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool operator()(Class* c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<1> hs(Thread::Current());
mirror::Class::ComputeName(hs.NewHandle(c));
return true;
@@ -700,7 +700,7 @@
class_linker->VisitClassesWithoutClassesLock(&visitor);
}
-static bool IsBootClassLoaderClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) {
+static bool IsBootClassLoaderClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) {
return klass->GetClassLoader() == nullptr;
}
@@ -828,7 +828,7 @@
public:
explicit NonImageClassesVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {}
- bool operator()(Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool operator()(Class* klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
if (!image_writer_->KeepClass(klass)) {
classes_to_prune_.insert(klass);
}
@@ -1603,7 +1603,7 @@
}
void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
*roots[i] = image_writer_->GetImageAddress(*roots[i]);
}
@@ -1611,7 +1611,7 @@
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
roots[i]->Assign(image_writer_->GetImageAddress(roots[i]->AsMirrorPtr()));
}
@@ -1864,7 +1864,7 @@
// java.lang.ref.Reference visitor.
void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
mirror::Reference::ReferentOffset(),
image_writer_->GetImageAddress(ref->GetReferent()));
@@ -1888,7 +1888,7 @@
void operator()(mirror::Class* klass ATTRIBUTE_UNUSED,
mirror::Reference* ref ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
LOG(FATAL) << "Reference not expected here.";
}
};
@@ -1904,14 +1904,14 @@
}
template <typename T>
-std::string PrettyPrint(T* ptr) SHARED_REQUIRES(Locks::mutator_lock_) {
+std::string PrettyPrint(T* ptr) REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostringstream oss;
oss << ptr;
return oss.str();
}
template <>
-std::string PrettyPrint(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) {
+std::string PrettyPrint(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
return PrettyMethod(method);
}
@@ -1945,7 +1945,7 @@
explicit NativeLocationVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {}
template <typename T>
- T* operator()(T* ptr) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ T* operator()(T* ptr) const REQUIRES_SHARED(Locks::mutator_lock_) {
return image_writer_->NativeLocationInImage(ptr);
}
@@ -2023,7 +2023,7 @@
explicit ImageAddressVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {}
template <typename T>
- T* operator()(T* ptr) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ T* operator()(T* ptr) const REQUIRES_SHARED(Locks::mutator_lock_) {
return image_writer_->GetImageAddress(ptr);
}
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 7d13656..76749cf 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -77,7 +77,7 @@
}
template <typename T>
- T* GetImageAddress(T* object) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ T* GetImageAddress(T* object) const REQUIRES_SHARED(Locks::mutator_lock_) {
if (object == nullptr || IsInBootImage(object)) {
return object;
} else {
@@ -87,11 +87,11 @@
}
}
- ArtMethod* GetImageMethodAddress(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtMethod* GetImageMethodAddress(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
template <typename PtrType>
PtrType GetDexCacheArrayElementImageAddress(const DexFile* dex_file, uint32_t offset)
- const SHARED_REQUIRES(Locks::mutator_lock_) {
+ const REQUIRES_SHARED(Locks::mutator_lock_) {
auto oat_it = dex_file_oat_index_map_.find(dex_file);
DCHECK(oat_it != dex_file_oat_index_map_.end());
const ImageInfo& image_info = GetImageInfo(oat_it->second);
@@ -132,7 +132,7 @@
// Get the index of the oat file containing the dex file served by the dex cache.
size_t GetOatIndexForDexCache(mirror::DexCache* dex_cache) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Update the oat layout for the given oat file.
// This will make the oat_offset for the next oat file valid.
@@ -147,7 +147,7 @@
bool AllocMemory();
// Mark the objects defined in this space in the given live bitmap.
- void RecordImageAllocations() SHARED_REQUIRES(Locks::mutator_lock_);
+ void RecordImageAllocations() REQUIRES_SHARED(Locks::mutator_lock_);
// Classify different kinds of bins that objects end up getting packed into during image writing.
// Ordered from dirtiest to cleanest (until ArtMethods).
@@ -311,34 +311,34 @@
// We use the lock word to store the offset of the object in the image.
void AssignImageOffset(mirror::Object* object, BinSlot bin_slot)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SetImageOffset(mirror::Object* object, size_t offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool IsImageOffsetAssigned(mirror::Object* object) const
- SHARED_REQUIRES(Locks::mutator_lock_);
- size_t GetImageOffset(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ size_t GetImageOffset(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_);
void UpdateImageOffset(mirror::Object* obj, uintptr_t offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- void PrepareDexCacheArraySlots() SHARED_REQUIRES(Locks::mutator_lock_);
- void AssignImageBinSlot(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ void PrepareDexCacheArraySlots() REQUIRES_SHARED(Locks::mutator_lock_);
+ void AssignImageBinSlot(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
void SetImageBinSlot(mirror::Object* object, BinSlot bin_slot)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool IsImageBinSlotAssigned(mirror::Object* object) const
- SHARED_REQUIRES(Locks::mutator_lock_);
- BinSlot GetImageBinSlot(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ BinSlot GetImageBinSlot(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_);
void AddDexCacheArrayRelocation(void* array, size_t offset, mirror::DexCache* dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_);
- void AddMethodPointerArray(mirror::PointerArray* arr) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void AddMethodPointerArray(mirror::PointerArray* arr) REQUIRES_SHARED(Locks::mutator_lock_);
static void* GetImageAddressCallback(void* writer, mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return reinterpret_cast<ImageWriter*>(writer)->GetImageAddress(obj);
}
mirror::Object* GetLocalAddress(mirror::Object* object) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
size_t offset = GetImageOffset(object);
size_t oat_index = GetOatIndex(object);
const ImageInfo& image_info = GetImageInfo(oat_index);
@@ -358,94 +358,94 @@
}
// Returns true if the class was in the original requested image classes list.
- bool KeepClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool KeepClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
// Debug aid that list of requested image classes.
void DumpImageClasses();
// Preinitializes some otherwise lazy fields (such as Class name) to avoid runtime image dirtying.
void ComputeLazyFieldsForImageClasses()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Remove unwanted classes from various roots.
- void PruneNonImageClasses() SHARED_REQUIRES(Locks::mutator_lock_);
+ void PruneNonImageClasses() REQUIRES_SHARED(Locks::mutator_lock_);
// Verify unwanted classes removed.
- void CheckNonImageClassesRemoved() SHARED_REQUIRES(Locks::mutator_lock_);
+ void CheckNonImageClassesRemoved() REQUIRES_SHARED(Locks::mutator_lock_);
static void CheckNonImageClassesRemovedCallback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Lays out where the image objects will be at runtime.
void CalculateNewObjectOffsets()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CreateHeader(size_t oat_index)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Object>* CreateImageRoots(size_t oat_index) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CalculateObjectBinSlots(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void UnbinObjectsIntoOffset(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void WalkInstanceFields(mirror::Object* obj, mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void WalkFieldsInOrder(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void WalkFieldsCallback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Creates the contiguous image in memory and adjusts pointers.
- void CopyAndFixupNativeData(size_t oat_index) SHARED_REQUIRES(Locks::mutator_lock_);
- void CopyAndFixupObjects() SHARED_REQUIRES(Locks::mutator_lock_);
+ void CopyAndFixupNativeData(size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_);
+ void CopyAndFixupObjects() REQUIRES_SHARED(Locks::mutator_lock_);
static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_);
- void CopyAndFixupObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void CopyAndFixupObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info)
- SHARED_REQUIRES(Locks::mutator_lock_);
- void CopyAndFixupImTable(ImTable* orig, ImTable* copy) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void CopyAndFixupImTable(ImTable* orig, ImTable* copy) REQUIRES_SHARED(Locks::mutator_lock_);
void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void FixupClass(mirror::Class* orig, mirror::Class* copy)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void FixupObject(mirror::Object* orig, mirror::Object* copy)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void FixupDexCache(mirror::DexCache* orig_dex_cache, mirror::DexCache* copy_dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void FixupPointerArray(mirror::Object* dst,
mirror::PointerArray* arr,
mirror::Class* klass,
Bin array_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get quick code for non-resolution/imt_conflict/abstract method.
const uint8_t* GetQuickCode(ArtMethod* method,
const ImageInfo& image_info,
bool* quick_is_interpreted)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Calculate the sum total of the bin slot sizes in [0, up_to). Defaults to all bins.
size_t GetBinSizeSum(ImageInfo& image_info, Bin up_to = kBinSize) const;
// Return true if a method is likely to be dirtied at runtime.
- bool WillMethodBeDirty(ArtMethod* m) const SHARED_REQUIRES(Locks::mutator_lock_);
+ bool WillMethodBeDirty(ArtMethod* m) const REQUIRES_SHARED(Locks::mutator_lock_);
// Assign the offset for an ArtMethod.
void AssignMethodOffset(ArtMethod* method,
NativeObjectRelocationType type,
size_t oat_index)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- void TryAssignImTableOffset(ImTable* imt, size_t oat_index) SHARED_REQUIRES(Locks::mutator_lock_);
+ void TryAssignImTableOffset(ImTable* imt, size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_);
// Assign the offset for an IMT conflict table. Does nothing if the table already has a native
// relocation.
void TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Return true if klass is loaded by the boot class loader but not in the boot image.
- bool IsBootClassLoaderNonImageClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsBootClassLoaderNonImageClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
// Return true if klass depends on a boot class loader non image class. We want to prune these
// classes since we do not want any boot class loader classes in the image. This means that
@@ -453,25 +453,25 @@
// PruneAppImageClass also prunes if klass depends on a non-image class according to the compiler
// driver.
bool PruneAppImageClass(mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// early_exit is true if we had a cyclic dependency anywhere down the chain.
bool PruneAppImageClassInternal(mirror::Class* klass,
bool* early_exit,
std::unordered_set<mirror::Class*>* visited)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static Bin BinTypeForNativeRelocationType(NativeObjectRelocationType type);
- uintptr_t NativeOffsetInImage(void* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ uintptr_t NativeOffsetInImage(void* obj) REQUIRES_SHARED(Locks::mutator_lock_);
// Location of where the object will be when the image is loaded at runtime.
template <typename T>
- T* NativeLocationInImage(T* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ T* NativeLocationInImage(T* obj) REQUIRES_SHARED(Locks::mutator_lock_);
// Location of where the temporary copy of the object currently is.
template <typename T>
- T* NativeCopyLocation(T* obj, mirror::DexCache* dex_cache) SHARED_REQUIRES(Locks::mutator_lock_);
+ T* NativeCopyLocation(T* obj, mirror::DexCache* dex_cache) REQUIRES_SHARED(Locks::mutator_lock_);
// Return true of obj is inside of the boot image space. This may only return true if we are
// compiling an app image.
@@ -481,7 +481,7 @@
bool IsInBootOatFile(const void* ptr) const;
// Get the index of the oat file associated with the object.
- size_t GetOatIndex(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
+ size_t GetOatIndex(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_);
// The oat index for shared data in multi-image and all data in single-image compilation.
size_t GetDefaultOatIndex() const {
@@ -498,7 +498,7 @@
// Find an already strong interned string in the other images or in the boot image. Used to
// remove duplicates in the multi image and app image case.
- mirror::String* FindInternedString(mirror::String* string) SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::String* FindInternedString(mirror::String* string) REQUIRES_SHARED(Locks::mutator_lock_);
// Return true if there already exists a native allocation for an object.
bool NativeRelocationAssigned(void* ptr) const;
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 6f6a8f5..7246ace 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -58,14 +58,14 @@
extern "C" bool jit_compile_method(
void* handle, ArtMethod* method, Thread* self, bool osr)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
DCHECK(jit_compiler != nullptr);
return jit_compiler->CompileMethod(self, method, osr);
}
extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t count)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
DCHECK(jit_compiler != nullptr);
if (jit_compiler->GetCompilerOptions()->GetGenerateDebugInfo()) {
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index 533dccf..18e3155 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -37,7 +37,7 @@
// Compilation entrypoint. Returns whether the compilation succeeded.
bool CompileMethod(Thread* self, ArtMethod* method, bool osr)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
CompilerOptions* GetCompilerOptions() const {
return compiler_options_.get();
@@ -59,7 +59,7 @@
// This is in the compiler since the runtime doesn't have access to the compiled method
// structures.
bool AddToCodeCache(ArtMethod* method, const CompiledMethod* compiled_method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
DISALLOW_COPY_AND_ASSIGN(JitCompiler);
};
diff --git a/compiler/jni/jni_cfi_test_expected.inc b/compiler/jni/jni_cfi_test_expected.inc
index da72c75..a205800 100644
--- a/compiler/jni/jni_cfi_test_expected.inc
+++ b/compiler/jni/jni_cfi_test_expected.inc
@@ -332,14 +332,14 @@
0x20, 0x00, 0xBD, 0x27, 0x20, 0x00, 0xB2, 0x8F, 0x24, 0x00, 0xB3, 0x8F,
0x28, 0x00, 0xB4, 0x8F, 0x2C, 0x00, 0xB5, 0x8F, 0x30, 0x00, 0xB6, 0x8F,
0x34, 0x00, 0xB7, 0x8F, 0x38, 0x00, 0xBE, 0x8F, 0x3C, 0x00, 0xBF, 0x8F,
- 0x40, 0x00, 0xBD, 0x27, 0x09, 0x00, 0xE0, 0x03, 0x00, 0x00, 0x00, 0x00,
+ 0x09, 0x00, 0xE0, 0x03, 0x40, 0x00, 0xBD, 0x27,
};
static constexpr uint8_t expected_cfi_kMips[] = {
0x44, 0x0E, 0x40, 0x44, 0x9F, 0x01, 0x44, 0x9E, 0x02, 0x44, 0x97, 0x03,
0x44, 0x96, 0x04, 0x44, 0x95, 0x05, 0x44, 0x94, 0x06, 0x44, 0x93, 0x07,
0x44, 0x92, 0x08, 0x58, 0x0E, 0x60, 0x44, 0x0E, 0x40, 0x0A, 0x44, 0xD2,
0x44, 0xD3, 0x44, 0xD4, 0x44, 0xD5, 0x44, 0xD6, 0x44, 0xD7, 0x44, 0xDE,
- 0x44, 0xDF, 0x44, 0x0E, 0x00, 0x48, 0x0B, 0x0E, 0x40,
+ 0x44, 0xDF, 0x48, 0x0E, 0x00, 0x0B, 0x0E, 0x40,
};
// 0x00000000: addiu r29, r29, -64
// 0x00000004: .cfi_def_cfa_offset: 64
@@ -385,12 +385,11 @@
// 0x0000005c: .cfi_restore: r30
// 0x0000005c: lw r31, +60(r29)
// 0x00000060: .cfi_restore: r31
-// 0x00000060: addiu r29, r29, 64
-// 0x00000064: .cfi_def_cfa_offset: 0
-// 0x00000064: jr r31
-// 0x00000068: nop
-// 0x0000006c: .cfi_restore_state
-// 0x0000006c: .cfi_def_cfa_offset: 64
+// 0x00000060: jr r31
+// 0x00000064: addiu r29, r29, 64
+// 0x00000068: .cfi_def_cfa_offset: 0
+// 0x00000068: .cfi_restore_state
+// 0x00000068: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kMips64[] = {
0x90, 0xFF, 0xBD, 0x67, 0x68, 0x00, 0xBF, 0xFF, 0x60, 0x00, 0xBE, 0xFF,
diff --git a/compiler/linker/arm64/relative_patcher_arm64_test.cc b/compiler/linker/arm64/relative_patcher_arm64_test.cc
index 573de73..9932c79 100644
--- a/compiler/linker/arm64/relative_patcher_arm64_test.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64_test.cc
@@ -701,7 +701,7 @@
#define DEFAULT_LDUR_LDR_TEST(adrp_offset, disp) \
TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference ## adrp_offset ## Ldur ## disp) { \
- bool has_thunk = (adrp_offset == 0xff8u || adrp_offset == 0xffcu); \
+ bool has_thunk = ((adrp_offset) == 0xff8u || (adrp_offset) == 0xffcu); \
TestAdrpLdurLdr(adrp_offset, has_thunk, 0x12345678u, disp); \
}
@@ -725,8 +725,8 @@
// LDR <Xt>, <label> is aligned when offset + displacement is a multiple of 8.
#define LDRX_PCREL_LDR_TEST(adrp_offset, disp) \
TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference ## adrp_offset ## XPcRel ## disp) { \
- bool unaligned = !IsAligned<8u>(adrp_offset + 4u + static_cast<uint32_t>(disp)); \
- bool has_thunk = (adrp_offset == 0xff8u || adrp_offset == 0xffcu) && unaligned; \
+ bool unaligned = !IsAligned<8u>((adrp_offset) + 4u + static_cast<uint32_t>(disp)); \
+ bool has_thunk = ((adrp_offset) == 0xff8u || (adrp_offset) == 0xffcu) && unaligned; \
TestAdrpLdrPcRelLdr(kLdrXPcRelInsn, disp, adrp_offset, has_thunk, 0x12345678u, 0x1234u); \
}
@@ -735,21 +735,21 @@
// LDR <Wt>, [SP, #<pimm>] and LDR <Xt>, [SP, #<pimm>] are always aligned. No fixup needed.
#define LDRW_SPREL_LDR_TEST(adrp_offset, disp) \
TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference ## adrp_offset ## WSpRel ## disp) { \
- TestAdrpLdrSpRelLdr(kLdrWSpRelInsn, disp >> 2, adrp_offset, false, 0x12345678u, 0x1234u); \
+ TestAdrpLdrSpRelLdr(kLdrWSpRelInsn, (disp) >> 2, adrp_offset, false, 0x12345678u, 0x1234u); \
}
TEST_FOR_OFFSETS(LDRW_SPREL_LDR_TEST, 0, 4)
#define LDRX_SPREL_LDR_TEST(adrp_offset, disp) \
TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference ## adrp_offset ## XSpRel ## disp) { \
- TestAdrpLdrSpRelLdr(kLdrXSpRelInsn, disp >> 3, adrp_offset, false, 0x12345678u, 0x1234u); \
+ TestAdrpLdrSpRelLdr(kLdrXSpRelInsn, (disp) >> 3, adrp_offset, false, 0x12345678u, 0x1234u); \
}
TEST_FOR_OFFSETS(LDRX_SPREL_LDR_TEST, 0, 8)
#define DEFAULT_LDUR_ADD_TEST(adrp_offset, disp) \
TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## Ldur ## disp) { \
- bool has_thunk = (adrp_offset == 0xff8u || adrp_offset == 0xffcu); \
+ bool has_thunk = ((adrp_offset) == 0xff8u || (adrp_offset) == 0xffcu); \
TestAdrpLdurAdd(adrp_offset, has_thunk, disp); \
}
@@ -793,7 +793,7 @@
TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## AddsX0X2 ## disp) { \
/* ADDS that does not use the result of "ADRP x0, addr" but overwrites that register. */ \
uint32_t adds = kAddsXInsn | (100 << 10) | (2u << 5) | 0u; /* ADDS x0, x2, #100 */ \
- bool has_thunk = (adrp_offset == 0xff8u || adrp_offset == 0xffcu); \
+ bool has_thunk = ((adrp_offset) == 0xff8u || (adrp_offset) == 0xffcu); \
TestAdrpInsn2Add(adds, adrp_offset, has_thunk, disp); \
}
@@ -810,8 +810,8 @@
// LDR <Xt>, <label> is aligned when offset + displacement is a multiple of 8.
#define LDRX_PCREL_ADD_TEST(adrp_offset, disp) \
TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## XPcRel ## disp) { \
- bool unaligned = !IsAligned<8u>(adrp_offset + 4u + static_cast<uint32_t>(disp)); \
- bool has_thunk = (adrp_offset == 0xff8u || adrp_offset == 0xffcu) && unaligned; \
+ bool unaligned = !IsAligned<8u>((adrp_offset) + 4u + static_cast<uint32_t>(disp)); \
+ bool has_thunk = ((adrp_offset) == 0xff8u || (adrp_offset) == 0xffcu) && unaligned; \
TestAdrpLdrPcRelAdd(kLdrXPcRelInsn, disp, adrp_offset, has_thunk, 0x12345678u); \
}
@@ -820,14 +820,14 @@
// LDR <Wt>, [SP, #<pimm>] and LDR <Xt>, [SP, #<pimm>] are always aligned. No fixup needed.
#define LDRW_SPREL_ADD_TEST(adrp_offset, disp) \
TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## WSpRel ## disp) { \
- TestAdrpLdrSpRelAdd(kLdrWSpRelInsn, disp >> 2, adrp_offset, false, 0x12345678u); \
+ TestAdrpLdrSpRelAdd(kLdrWSpRelInsn, (disp) >> 2, adrp_offset, false, 0x12345678u); \
}
TEST_FOR_OFFSETS(LDRW_SPREL_ADD_TEST, 0, 4)
#define LDRX_SPREL_ADD_TEST(adrp_offset, disp) \
TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## XSpRel ## disp) { \
- TestAdrpLdrSpRelAdd(kLdrXSpRelInsn, disp >> 3, adrp_offset, false, 0x12345678u); \
+ TestAdrpLdrSpRelAdd(kLdrXSpRelInsn, (disp) >> 3, adrp_offset, false, 0x12345678u); \
}
TEST_FOR_OFFSETS(LDRX_SPREL_ADD_TEST, 0, 8)
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index bf53bb2..b1e3811 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -60,7 +60,7 @@
void CheckMethod(ArtMethod* method,
const OatFile::OatMethod& oat_method,
const DexFile& dex_file)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const CompiledMethod* compiled_method =
compiler_driver_->GetCompiledMethod(MethodReference(&dex_file,
method->GetDexMethodIndex()));
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 8a80982..c9c5d24 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -226,7 +226,6 @@
return dex_file_location_data_;
}
- void ReserveTypeLookupTable(OatWriter* oat_writer);
void ReserveClassOffsets(OatWriter* oat_writer);
size_t SizeOf() const;
@@ -436,36 +435,35 @@
instruction_set_features,
dchecked_integral_cast<uint32_t>(oat_dex_files_.size()),
key_value_store);
- offset = InitOatDexFiles(offset);
- size_ = offset;
+ size_ = InitOatDexFiles(offset);
std::unique_ptr<MemMap> dex_files_map;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- if (!WriteDexFiles(rodata, file)) {
+ if (!WriteDexFiles(rodata, file) ||
+ !OpenDexFiles(file, verify, &dex_files_map, &dex_files)) {
return false;
}
- // Reserve space for type lookup tables and update type_lookup_table_offset_.
- for (OatDexFile& oat_dex_file : oat_dex_files_) {
- oat_dex_file.ReserveTypeLookupTable(this);
+
+ // Do a bulk checksum update for Dex[]. Doing it piece by piece would be
+ // difficult because we're not using the OutputStream directly.
+ if (!oat_dex_files_.empty()) {
+ size_t size = size_ - oat_dex_files_[0].dex_file_offset_;
+ oat_header_->UpdateChecksum(dex_files_map->Begin(), size);
}
- size_t size_after_type_lookup_tables = size_;
+
+ ChecksumUpdatingOutputStream checksum_updating_rodata(rodata, oat_header_.get());
+
+ if (!WriteTypeLookupTables(&checksum_updating_rodata, dex_files)) {
+ return false;
+ }
+
// Reserve space for class offsets and update class_offsets_offset_.
for (OatDexFile& oat_dex_file : oat_dex_files_) {
oat_dex_file.ReserveClassOffsets(this);
}
- ChecksumUpdatingOutputStream checksum_updating_rodata(rodata, oat_header_.get());
- if (!WriteOatDexFiles(&checksum_updating_rodata) ||
- !ExtendForTypeLookupTables(rodata, file, size_after_type_lookup_tables) ||
- !OpenDexFiles(file, verify, &dex_files_map, &dex_files) ||
- !WriteTypeLookupTables(dex_files_map.get(), dex_files)) {
- return false;
- }
- // Do a bulk checksum update for Dex[] and TypeLookupTable[]. Doing it piece by
- // piece would be difficult because we're not using the OutpuStream directly.
- if (!oat_dex_files_.empty()) {
- size_t size = size_after_type_lookup_tables - oat_dex_files_[0].dex_file_offset_;
- oat_header_->UpdateChecksum(dex_files_map->Begin(), size);
+ if (!WriteOatDexFiles(&checksum_updating_rodata)) {
+ return false;
}
*opened_dex_files_map = std::move(dex_files_map);
@@ -684,7 +682,7 @@
}
bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
@@ -845,7 +843,7 @@
}
bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
@@ -887,7 +885,7 @@
}
bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile::TypeId& type_id =
dex_file_->GetTypeId(dex_file_->GetClassDef(class_def_index_).class_idx_);
const char* class_descriptor = dex_file_->GetTypeDescriptor(type_id);
@@ -974,7 +972,7 @@
}
bool StartClass(const DexFile* dex_file, size_t class_def_index)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
OatDexMethodVisitor::StartClass(dex_file, class_def_index);
if (dex_cache_ == nullptr || dex_cache_->GetDexFile() != dex_file) {
dex_cache_ = class_linker_->FindDexCache(Thread::Current(), *dex_file);
@@ -983,7 +981,7 @@
return true;
}
- bool EndClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool EndClass() REQUIRES_SHARED(Locks::mutator_lock_) {
bool result = OatDexMethodVisitor::EndClass();
if (oat_class_index_ == writer_->oat_classes_.size()) {
DCHECK(result); // OatDexMethodVisitor::EndClass() never fails.
@@ -997,7 +995,7 @@
}
bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
@@ -1138,7 +1136,7 @@
}
ArtMethod* GetTargetMethod(const LinkerPatch& patch)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
MethodReference ref = patch.TargetMethod();
mirror::DexCache* dex_cache =
(dex_file_ == ref.dex_file) ? dex_cache_ : class_linker_->FindDexCache(
@@ -1149,7 +1147,7 @@
return method;
}
- uint32_t GetTargetOffset(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t GetTargetOffset(const LinkerPatch& patch) REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t target_offset = writer_->relative_patcher_->GetOffset(patch.TargetMethod());
// If there's no new compiled code, either we're compiling an app and the target method
// is in the boot image, or we need to point to the correct trampoline.
@@ -1175,20 +1173,20 @@
}
mirror::DexCache* GetDexCache(const DexFile* target_dex_file)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return (target_dex_file == dex_file_)
? dex_cache_
: class_linker_->FindDexCache(Thread::Current(), *target_dex_file);
}
- mirror::Class* GetTargetType(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Class* GetTargetType(const LinkerPatch& patch) REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::DexCache* dex_cache = GetDexCache(patch.TargetTypeDexFile());
mirror::Class* type = dex_cache->GetResolvedType(patch.TargetTypeIndex());
CHECK(type != nullptr);
return type;
}
- mirror::String* GetTargetString(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::String* GetTargetString(const LinkerPatch& patch) REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedObjectAccessUnchecked soa(Thread::Current());
StackHandleScope<1> hs(soa.Self());
ClassLinker* linker = Runtime::Current()->GetClassLinker();
@@ -1202,7 +1200,7 @@
return string;
}
- uint32_t GetDexCacheOffset(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t GetDexCacheOffset(const LinkerPatch& patch) REQUIRES_SHARED(Locks::mutator_lock_) {
if (writer_->HasBootImage()) {
uintptr_t element = writer_->image_writer_->GetDexCacheArrayElementImageAddress<uintptr_t>(
patch.TargetDexCacheDexFile(), patch.TargetDexCacheElementOffset());
@@ -1215,7 +1213,7 @@
}
}
- uint32_t GetTargetObjectOffset(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t GetTargetObjectOffset(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(writer_->HasBootImage());
object = writer_->image_writer_->GetImageAddress(object);
size_t oat_index = writer_->image_writer_->GetOatIndexForDexFile(dex_file_);
@@ -1225,7 +1223,7 @@
}
void PatchObjectAddress(std::vector<uint8_t>* code, uint32_t offset, mirror::Object* object)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (writer_->HasBootImage()) {
object = writer_->image_writer_->GetImageAddress(object);
} else {
@@ -1245,7 +1243,7 @@
}
void PatchMethodAddress(std::vector<uint8_t>* code, uint32_t offset, ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (writer_->HasBootImage()) {
method = writer_->image_writer_->GetImageMethodAddress(method);
} else if (kIsDebugBuild) {
@@ -1273,7 +1271,7 @@
}
void PatchCodeAddress(std::vector<uint8_t>* code, uint32_t offset, uint32_t target_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t address = target_offset;
if (writer_->HasBootImage()) {
size_t oat_index = writer_->image_writer_->GetOatIndexForDexCache(dex_cache_);
@@ -1525,7 +1523,7 @@
off_t tables_end_offset = out->Seek(0, kSeekCurrent);
if (tables_end_offset == static_cast<off_t>(-1)) {
- LOG(ERROR) << "Failed to seek to oat code position in " << out->GetLocation();
+ LOG(ERROR) << "Failed to get oat code position in " << out->GetLocation();
return false;
}
size_t file_offset = oat_data_offset_;
@@ -2094,6 +2092,12 @@
bool OatWriter::WriteOatDexFiles(OutputStream* rodata) {
TimingLogger::ScopedTiming split("WriteOatDexFiles", timings_);
+ off_t initial_offset = rodata->Seek(0, kSeekCurrent);
+ if (initial_offset == static_cast<off_t>(-1)) {
+ LOG(ERROR) << "Failed to get current position in " << rodata->GetLocation();
+ return false;
+ }
+
// Seek to the start of OatDexFiles, i.e. to the end of the OatHeader. If there are
// no OatDexFiles, no data is actually written to .rodata before WriteHeader() and
// this Seek() ensures that we reserve the space for OatHeader in .rodata.
@@ -2119,30 +2123,13 @@
}
}
- return true;
-}
+ // Seek back to the initial position.
+ if (rodata->Seek(initial_offset, kSeekSet) != initial_offset) {
+ PLOG(ERROR) << "Failed to seek to initial position. Actual: " << actual_offset
+ << " Expected: " << initial_offset << " File: " << rodata->GetLocation();
+ return false;
+ }
-bool OatWriter::ExtendForTypeLookupTables(OutputStream* rodata, File* file, size_t offset) {
- TimingLogger::ScopedTiming split("ExtendForTypeLookupTables", timings_);
-
- int64_t new_length = oat_data_offset_ + dchecked_integral_cast<int64_t>(offset);
- if (file->SetLength(new_length) != 0) {
- PLOG(ERROR) << "Failed to extend file for type lookup tables. new_length: " << new_length
- << "File: " << file->GetPath();
- return false;
- }
- off_t actual_offset = rodata->Seek(new_length, kSeekSet);
- if (actual_offset != static_cast<off_t>(new_length)) {
- PLOG(ERROR) << "Failed to seek stream after extending file for type lookup tables."
- << " Actual: " << actual_offset << " Expected: " << new_length
- << " File: " << rodata->GetLocation();
- return false;
- }
- if (!rodata->Flush()) {
- PLOG(ERROR) << "Failed to flush stream after extending for type lookup tables."
- << " File: " << rodata->GetLocation();
- return false;
- }
return true;
}
@@ -2223,26 +2210,66 @@
}
bool OatWriter::WriteTypeLookupTables(
- MemMap* opened_dex_files_map,
+ OutputStream* rodata,
const std::vector<std::unique_ptr<const DexFile>>& opened_dex_files) {
TimingLogger::ScopedTiming split("WriteTypeLookupTables", timings_);
DCHECK_EQ(opened_dex_files.size(), oat_dex_files_.size());
for (size_t i = 0, size = opened_dex_files.size(); i != size; ++i) {
OatDexFile* oat_dex_file = &oat_dex_files_[i];
- if (oat_dex_file->lookup_table_offset_ != 0u) {
- DCHECK(oat_dex_file->create_type_lookup_table_ == CreateTypeLookupTable::kCreate);
- DCHECK_NE(oat_dex_file->class_offsets_.size(), 0u);
- size_t map_offset = oat_dex_files_[0].dex_file_offset_;
- size_t lookup_table_offset = oat_dex_file->lookup_table_offset_;
- uint8_t* lookup_table = opened_dex_files_map->Begin() + (lookup_table_offset - map_offset);
- opened_dex_files[i]->CreateTypeLookupTable(lookup_table);
+ DCHECK_EQ(oat_dex_file->lookup_table_offset_, 0u);
+
+ if (oat_dex_file->create_type_lookup_table_ != CreateTypeLookupTable::kCreate ||
+ oat_dex_file->class_offsets_.empty()) {
+ continue;
}
+
+ size_t table_size = TypeLookupTable::RawDataLength(oat_dex_file->class_offsets_.size());
+ if (table_size == 0u) {
+ continue;
+ }
+
+ // Create the lookup table. When `nullptr` is given as the storage buffer,
+ // TypeLookupTable allocates its own and DexFile takes ownership.
+ opened_dex_files[i]->CreateTypeLookupTable(/* storage */ nullptr);
+ TypeLookupTable* table = opened_dex_files[i]->GetTypeLookupTable();
+
+ // Type tables are required to be 4 byte aligned.
+ size_t original_offset = size_;
+ size_t rodata_offset = RoundUp(original_offset, 4);
+ size_t padding_size = rodata_offset - original_offset;
+
+ if (padding_size != 0u) {
+ std::vector<uint8_t> buffer(padding_size, 0u);
+ if (!rodata->WriteFully(buffer.data(), padding_size)) {
+ PLOG(ERROR) << "Failed to write lookup table alignment padding."
+ << " File: " << oat_dex_file->GetLocation()
+ << " Output: " << rodata->GetLocation();
+ return false;
+ }
+ }
+
+ DCHECK_EQ(oat_data_offset_ + rodata_offset,
+ static_cast<size_t>(rodata->Seek(0u, kSeekCurrent)));
+ DCHECK_EQ(table_size, table->RawDataLength());
+
+ if (!rodata->WriteFully(table->RawData(), table_size)) {
+ PLOG(ERROR) << "Failed to write lookup table."
+ << " File: " << oat_dex_file->GetLocation()
+ << " Output: " << rodata->GetLocation();
+ return false;
+ }
+
+ oat_dex_file->lookup_table_offset_ = rodata_offset;
+
+ size_ += padding_size + table_size;
+ size_oat_lookup_table_ += table_size;
+ size_oat_lookup_table_alignment_ += padding_size;
}
- DCHECK_EQ(opened_dex_files_map == nullptr, opened_dex_files.empty());
- if (opened_dex_files_map != nullptr && !opened_dex_files_map->Sync()) {
- PLOG(ERROR) << "Failed to Sync() type lookup tables. Map: " << opened_dex_files_map->GetName();
+ if (!rodata->Flush()) {
+ PLOG(ERROR) << "Failed to flush stream after writing type lookup tables."
+ << " File: " << rodata->GetLocation();
return false;
}
@@ -2298,22 +2325,6 @@
+ sizeof(lookup_table_offset_);
}
-void OatWriter::OatDexFile::ReserveTypeLookupTable(OatWriter* oat_writer) {
- DCHECK_EQ(lookup_table_offset_, 0u);
- if (create_type_lookup_table_ == CreateTypeLookupTable::kCreate && !class_offsets_.empty()) {
- size_t table_size = TypeLookupTable::RawDataLength(class_offsets_.size());
- if (table_size != 0u) {
- // Type tables are required to be 4 byte aligned.
- size_t original_offset = oat_writer->size_;
- size_t offset = RoundUp(original_offset, 4);
- oat_writer->size_oat_lookup_table_alignment_ += offset - original_offset;
- lookup_table_offset_ = offset;
- oat_writer->size_ = offset + table_size;
- oat_writer->size_oat_lookup_table_ += table_size;
- }
- }
-}
-
void OatWriter::OatDexFile::ReserveClassOffsets(OatWriter* oat_writer) {
DCHECK_EQ(class_offsets_offset_, 0u);
if (!class_offsets_.empty()) {
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index decb7db..93e2e44 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -262,12 +262,11 @@
bool WriteDexFile(OutputStream* rodata, File* file, OatDexFile* oat_dex_file, File* dex_file);
bool WriteDexFile(OutputStream* rodata, OatDexFile* oat_dex_file, const uint8_t* dex_file);
bool WriteOatDexFiles(OutputStream* rodata);
- bool ExtendForTypeLookupTables(OutputStream* rodata, File* file, size_t offset);
bool OpenDexFiles(File* file,
bool verify,
/*out*/ std::unique_ptr<MemMap>* opened_dex_files_map,
/*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files);
- bool WriteTypeLookupTables(MemMap* opened_dex_files_map,
+ bool WriteTypeLookupTables(OutputStream* rodata,
const std::vector<std::unique_ptr<const DexFile>>& opened_dex_files);
bool WriteCodeAlignment(OutputStream* out, uint32_t aligned_code_delta);
void SetMultiOatRelativePatcherAdjustment();
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 0d3f849..2087888 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -283,8 +283,7 @@
}
void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots,
- size_t maximum_number_of_live_core_registers,
- size_t maximum_number_of_live_fpu_registers,
+ size_t maximum_safepoint_spill_size,
size_t number_of_out_slots,
const ArenaVector<HBasicBlock*>& block_order) {
block_order_ = &block_order;
@@ -298,14 +297,12 @@
&& !HasAllocatedCalleeSaveRegisters()
&& IsLeafMethod()
&& !RequiresCurrentMethod()) {
- DCHECK_EQ(maximum_number_of_live_core_registers, 0u);
- DCHECK_EQ(maximum_number_of_live_fpu_registers, 0u);
+ DCHECK_EQ(maximum_safepoint_spill_size, 0u);
SetFrameSize(CallPushesPC() ? GetWordSize() : 0);
} else {
SetFrameSize(RoundUp(
first_register_slot_in_slow_path_
- + maximum_number_of_live_core_registers * GetWordSize()
- + maximum_number_of_live_fpu_registers * GetFloatingPointSpillSlotSize()
+ + maximum_safepoint_spill_size
+ FrameEntrySpillSize(),
kStackAlignment));
}
@@ -753,7 +750,7 @@
}
// Collect PC infos for the mapping table.
- uint32_t native_pc = GetAssembler()->CodeSize();
+ uint32_t native_pc = GetAssembler()->CodePosition();
if (instruction == nullptr) {
// For stack overflow checks and native-debug-info entries without dex register
@@ -765,21 +762,16 @@
LocationSummary* locations = instruction->GetLocations();
uint32_t register_mask = locations->GetRegisterMask();
- if (instruction->IsSuspendCheck()) {
- // Suspend check has special ABI that saves the caller-save registers in callee,
- // so we want to emit stack maps containing the registers.
- // TODO: Register allocator still reserves space for the caller-save registers.
- // We should add slow-path-specific caller-save information into LocationSummary
- // and refactor the code here as well as in the register allocator to use it.
+ DCHECK_EQ(register_mask & ~locations->GetLiveRegisters()->GetCoreRegisters(), 0u);
+ if (locations->OnlyCallsOnSlowPath()) {
+ // In case of slow path, we currently set the location of caller-save registers
+ // to register (instead of their stack location when pushed before the slow-path
+ // call). Therefore register_mask contains both callee-save and caller-save
+ // registers that hold objects. We must remove the spilled caller-save from the
+ // mask, since they will be overwritten by the callee.
+ uint32_t spills = GetSlowPathSpills(locations, /* core_registers */ true);
+ register_mask &= ~spills;
} else {
- if (locations->OnlyCallsOnSlowPath()) {
- // In case of slow path, we currently set the location of caller-save registers
- // to register (instead of their stack location when pushed before the slow-path
- // call). Therefore register_mask contains both callee-save and caller-save
- // registers that hold objects. We must remove the caller-save from the mask, since
- // they will be overwritten by the callee.
- register_mask &= core_callee_save_mask_;
- }
// The register mask must be a subset of callee-save registers.
DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
}
@@ -1236,58 +1228,44 @@
}
void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
- RegisterSet* live_registers = locations->GetLiveRegisters();
size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
- for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
- if (!codegen->IsCoreCalleeSaveRegister(i)) {
- if (live_registers->ContainsCoreRegister(i)) {
- // If the register holds an object, update the stack mask.
- if (locations->RegisterContainsObject(i)) {
- locations->SetStackBit(stack_offset / kVRegSize);
- }
- DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
- DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
- saved_core_stack_offsets_[i] = stack_offset;
- stack_offset += codegen->SaveCoreRegister(stack_offset, i);
- }
+ const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+ for (uint32_t i : LowToHighBits(core_spills)) {
+ // If the register holds an object, update the stack mask.
+ if (locations->RegisterContainsObject(i)) {
+ locations->SetStackBit(stack_offset / kVRegSize);
}
+ DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
+ DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
+ saved_core_stack_offsets_[i] = stack_offset;
+ stack_offset += codegen->SaveCoreRegister(stack_offset, i);
}
- for (size_t i = 0, e = codegen->GetNumberOfFloatingPointRegisters(); i < e; ++i) {
- if (!codegen->IsFloatingPointCalleeSaveRegister(i)) {
- if (live_registers->ContainsFloatingPointRegister(i)) {
- DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
- DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
- saved_fpu_stack_offsets_[i] = stack_offset;
- stack_offset += codegen->SaveFloatingPointRegister(stack_offset, i);
- }
- }
+ const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+ for (size_t i : LowToHighBits(fp_spills)) {
+ DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
+ DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
+ saved_fpu_stack_offsets_[i] = stack_offset;
+ stack_offset += codegen->SaveFloatingPointRegister(stack_offset, i);
}
}
void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
- RegisterSet* live_registers = locations->GetLiveRegisters();
size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
- for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
- if (!codegen->IsCoreCalleeSaveRegister(i)) {
- if (live_registers->ContainsCoreRegister(i)) {
- DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
- DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
- stack_offset += codegen->RestoreCoreRegister(stack_offset, i);
- }
- }
+ const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+ for (uint32_t i : LowToHighBits(core_spills)) {
+ DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
+ DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
+ stack_offset += codegen->RestoreCoreRegister(stack_offset, i);
}
- for (size_t i = 0, e = codegen->GetNumberOfFloatingPointRegisters(); i < e; ++i) {
- if (!codegen->IsFloatingPointCalleeSaveRegister(i)) {
- if (live_registers->ContainsFloatingPointRegister(i)) {
- DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
- DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
- stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, i);
- }
- }
+ const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+ for (size_t i : LowToHighBits(fp_spills)) {
+ DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
+ DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
+ stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, i);
}
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index fd396c4..0c60a98 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -22,9 +22,8 @@
#include "base/arena_containers.h"
#include "base/arena_object.h"
#include "base/bit_field.h"
+#include "base/bit_utils.h"
#include "base/enums.h"
-#include "compiled_method.h"
-#include "driver/compiler_options.h"
#include "globals.h"
#include "graph_visualizer.h"
#include "locations.h"
@@ -54,6 +53,7 @@
class Assembler;
class CodeGenerator;
class CompilerDriver;
+class CompilerOptions;
class LinkerPatch;
class ParallelMoveResolver;
@@ -212,8 +212,7 @@
virtual size_t GetFloatingPointSpillSlotSize() const = 0;
virtual uintptr_t GetAddressOf(HBasicBlock* block) = 0;
void InitializeCodeGeneration(size_t number_of_spill_slots,
- size_t maximum_number_of_live_core_registers,
- size_t maximum_number_of_live_fpu_registers,
+ size_t maximum_safepoint_spill_size,
size_t number_of_out_slots,
const ArenaVector<HBasicBlock*>& block_order);
// Backends can override this as necessary. For most, no special alignment is required.
@@ -279,6 +278,30 @@
return (fpu_callee_save_mask_ & (1 << reg)) != 0;
}
+ uint32_t GetSlowPathSpills(LocationSummary* locations, bool core_registers) const {
+ DCHECK(locations->OnlyCallsOnSlowPath() ||
+ (locations->Intrinsified() && locations->CallsOnMainAndSlowPath() &&
+ !locations->HasCustomSlowPathCallingConvention()));
+ uint32_t live_registers = core_registers
+ ? locations->GetLiveRegisters()->GetCoreRegisters()
+ : locations->GetLiveRegisters()->GetFloatingPointRegisters();
+ if (locations->HasCustomSlowPathCallingConvention()) {
+ // Save only the live registers that the custom calling convention wants us to save.
+ uint32_t caller_saves = core_registers
+ ? locations->GetCustomSlowPathCallerSaves().GetCoreRegisters()
+ : locations->GetCustomSlowPathCallerSaves().GetFloatingPointRegisters();
+ return live_registers & caller_saves;
+ } else {
+ // Default ABI, we need to spill non-callee-save live registers.
+ uint32_t callee_saves = core_registers ? core_callee_save_mask_ : fpu_callee_save_mask_;
+ return live_registers & ~callee_saves;
+ }
+ }
+
+ size_t GetNumberOfSlowPathSpills(LocationSummary* locations, bool core_registers) const {
+ return POPCOUNT(GetSlowPathSpills(locations, core_registers));
+ }
+
// Record native to dex mapping for a suspend point. Required by runtime.
void RecordPcInfo(HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path = nullptr);
// Check whether we have already recorded mapping at this PC.
@@ -601,7 +624,7 @@
return POPCOUNT(core_spill_mask_) * GetWordSize();
}
- bool HasAllocatedCalleeSaveRegisters() const {
+ virtual bool HasAllocatedCalleeSaveRegisters() const {
// We check the core registers against 1 because it always comprises the return PC.
return (POPCOUNT(allocated_registers_.GetCoreRegisters() & core_callee_save_mask_) != 1)
|| (POPCOUNT(allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_) != 0);
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index d8866a9..3c4a3e8 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -247,35 +247,6 @@
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM);
};
-class LoadStringSlowPathARM : public SlowPathCode {
- public:
- explicit LoadStringSlowPathARM(HLoadString* instruction) : SlowPathCode(instruction) {}
-
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = instruction_->GetLocations();
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
-
- CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
- __ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, locations);
-
- InvokeRuntimeCallingConvention calling_convention;
- const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
- __ LoadImmediate(calling_convention.GetRegisterAt(0), string_index);
- arm_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
- CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
- arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
-
- RestoreLiveRegisters(codegen, locations);
- __ b(GetExitLabel());
- }
-
- const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM"; }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM);
-};
-
class TypeCheckSlowPathARM : public SlowPathCode {
public:
TypeCheckSlowPathARM(HInstruction* instruction, bool is_fatal)
@@ -344,7 +315,6 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, instruction_->GetLocations());
arm_codegen->InvokeRuntime(kQuickDeoptimize, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickDeoptimize, void, void>();
}
@@ -1209,7 +1179,9 @@
SlowPathCode* slow_path) {
ValidateInvokeRuntime(instruction, slow_path);
GenerateInvokeRuntime(GetThreadOffset<kArmPointerSize>(entrypoint).Int32Value());
- RecordPcInfo(instruction, dex_pc, slow_path);
+ if (EntrypointRequiresStackMap(entrypoint)) {
+ RecordPcInfo(instruction, dex_pc, slow_path);
+ }
}
void CodeGeneratorARM::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
@@ -1530,6 +1502,7 @@
void LocationsBuilderARM::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -3957,6 +3930,9 @@
object_field_get_with_read_barrier ?
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
+ if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ }
locations->SetInAt(0, Location::RequiresRegister());
bool volatile_for_double = field_info.IsVolatile()
@@ -4433,6 +4409,9 @@
object_array_get_with_read_barrier ?
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
+ if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ }
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
if (Primitive::IsFloatingPointType(instruction->GetType())) {
@@ -4619,7 +4598,6 @@
}
if (needs_write_barrier) {
// Temporary registers for the write barrier.
- // These registers may be used for Baker read barriers too.
locations->AddTemp(Location::RequiresRegister()); // Possibly used for ref. poisoning too.
locations->AddTemp(Location::RequiresRegister());
}
@@ -4737,127 +4715,42 @@
__ Bind(&non_zero);
}
- if (kEmitCompilerReadBarrier) {
- if (!kUseBakerReadBarrier) {
- // When (non-Baker) read barriers are enabled, the type
- // checking instrumentation requires two read barriers
- // generated by CodeGeneratorARM::GenerateReadBarrierSlow:
- //
- // __ Mov(temp2, temp1);
- // // /* HeapReference<Class> */ temp1 = temp1->component_type_
- // __ LoadFromOffset(kLoadWord, temp1, temp1, component_offset);
- // codegen_->GenerateReadBarrierSlow(
- // instruction, temp1_loc, temp1_loc, temp2_loc, component_offset);
- //
- // // /* HeapReference<Class> */ temp2 = value->klass_
- // __ LoadFromOffset(kLoadWord, temp2, value, class_offset);
- // codegen_->GenerateReadBarrierSlow(
- // instruction, temp2_loc, temp2_loc, value_loc, class_offset, temp1_loc);
- //
- // __ cmp(temp1, ShifterOperand(temp2));
- //
- // However, the second read barrier may trash `temp`, as it
- // is a temporary register, and as such would not be saved
- // along with live registers before calling the runtime (nor
- // restored afterwards). So in this case, we bail out and
- // delegate the work to the array set slow path.
- //
- // TODO: Extend the register allocator to support a new
- // "(locally) live temp" location so as to avoid always
- // going into the slow path when read barriers are enabled?
- //
- // There is no such problem with Baker read barriers (see below).
- __ b(slow_path->GetEntryLabel());
- } else {
- Register temp3 = IP;
- Location temp3_loc = Location::RegisterLocation(temp3);
+ // Note that when read barriers are enabled, the type checks
+ // are performed without read barriers. This is fine, even in
+ // the case where a class object is in the from-space after
+ // the flip, as a comparison involving such a type would not
+ // produce a false positive; it may of course produce a false
+ // negative, in which case we would take the ArraySet slow
+ // path.
- // Note: `temp3` (scratch register IP) cannot be used as
- // `ref` argument of GenerateFieldLoadWithBakerReadBarrier
- // calls below (see ReadBarrierMarkSlowPathARM for more
- // details).
+ // /* HeapReference<Class> */ temp1 = array->klass_
+ __ LoadFromOffset(kLoadWord, temp1, array, class_offset);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ MaybeUnpoisonHeapReference(temp1);
- // /* HeapReference<Class> */ temp1 = array->klass_
- codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
- temp1_loc,
- array,
- class_offset,
- temp3_loc,
- /* needs_null_check */ true);
+ // /* HeapReference<Class> */ temp1 = temp1->component_type_
+ __ LoadFromOffset(kLoadWord, temp1, temp1, component_offset);
+ // /* HeapReference<Class> */ temp2 = value->klass_
+ __ LoadFromOffset(kLoadWord, temp2, value, class_offset);
+ // If heap poisoning is enabled, no need to unpoison `temp1`
+ // nor `temp2`, as we are comparing two poisoned references.
+ __ cmp(temp1, ShifterOperand(temp2));
- // /* HeapReference<Class> */ temp1 = temp1->component_type_
- codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
- temp1_loc,
- temp1,
- component_offset,
- temp3_loc,
- /* needs_null_check */ false);
- // Register `temp1` is not trashed by the read barrier
- // emitted by GenerateFieldLoadWithBakerReadBarrier below,
- // as that method produces a call to a ReadBarrierMarkRegX
- // entry point, which saves all potentially live registers,
- // including temporaries such a `temp1`.
- // /* HeapReference<Class> */ temp2 = value->klass_
- codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
- temp2_loc,
- value,
- class_offset,
- temp3_loc,
- /* needs_null_check */ false);
- // If heap poisoning is enabled, `temp1` and `temp2` have
- // been unpoisoned by the the previous calls to
- // CodeGeneratorARM::GenerateFieldLoadWithBakerReadBarrier.
- __ cmp(temp1, ShifterOperand(temp2));
-
- if (instruction->StaticTypeOfArrayIsObjectArray()) {
- Label do_put;
- __ b(&do_put, EQ);
- // We do not need to emit a read barrier for the
- // following heap reference load, as `temp1` is only used
- // in a comparison with null below, and this reference
- // is not kept afterwards.
- // /* HeapReference<Class> */ temp1 = temp1->super_class_
- __ LoadFromOffset(kLoadWord, temp1, temp1, super_offset);
- // If heap poisoning is enabled, no need to unpoison
- // `temp`, as we are comparing against null below.
- __ CompareAndBranchIfNonZero(temp1, slow_path->GetEntryLabel());
- __ Bind(&do_put);
- } else {
- __ b(slow_path->GetEntryLabel(), NE);
- }
- }
- } else {
- // Non read barrier code.
-
- // /* HeapReference<Class> */ temp1 = array->klass_
- __ LoadFromOffset(kLoadWord, temp1, array, class_offset);
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ if (instruction->StaticTypeOfArrayIsObjectArray()) {
+ Label do_put;
+ __ b(&do_put, EQ);
+ // If heap poisoning is enabled, the `temp1` reference has
+ // not been unpoisoned yet; unpoison it now.
__ MaybeUnpoisonHeapReference(temp1);
- // /* HeapReference<Class> */ temp1 = temp1->component_type_
- __ LoadFromOffset(kLoadWord, temp1, temp1, component_offset);
- // /* HeapReference<Class> */ temp2 = value->klass_
- __ LoadFromOffset(kLoadWord, temp2, value, class_offset);
- // If heap poisoning is enabled, no need to unpoison `temp1`
- // nor `temp2`, as we are comparing two poisoned references.
- __ cmp(temp1, ShifterOperand(temp2));
-
- if (instruction->StaticTypeOfArrayIsObjectArray()) {
- Label do_put;
- __ b(&do_put, EQ);
- // If heap poisoning is enabled, the `temp1` reference has
- // not been unpoisoned yet; unpoison it now.
- __ MaybeUnpoisonHeapReference(temp1);
-
- // /* HeapReference<Class> */ temp1 = temp1->super_class_
- __ LoadFromOffset(kLoadWord, temp1, temp1, super_offset);
- // If heap poisoning is enabled, no need to unpoison
- // `temp1`, as we are comparing against null below.
- __ CompareAndBranchIfNonZero(temp1, slow_path->GetEntryLabel());
- __ Bind(&do_put);
- } else {
- __ b(slow_path->GetEntryLabel(), NE);
- }
+ // /* HeapReference<Class> */ temp1 = temp1->super_class_
+ __ LoadFromOffset(kLoadWord, temp1, temp1, super_offset);
+ // If heap poisoning is enabled, no need to unpoison
+ // `temp1`, as we are comparing against null below.
+ __ CompareAndBranchIfNonZero(temp1, slow_path->GetEntryLabel());
+ __ Bind(&do_put);
+ } else {
+ __ b(slow_path->GetEntryLabel(), NE);
}
}
@@ -5052,7 +4945,9 @@
}
void LocationsBuilderARM::VisitSuspendCheck(HSuspendCheck* instruction) {
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
}
void InstructionCodeGeneratorARM::VisitSuspendCheck(HSuspendCheck* instruction) {
@@ -5327,17 +5222,6 @@
HLoadClass::LoadKind CodeGeneratorARM::GetSupportedLoadClassKind(
HLoadClass::LoadKind desired_class_load_kind) {
- if (kEmitCompilerReadBarrier) {
- switch (desired_class_load_kind) {
- case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
- case HLoadClass::LoadKind::kBootImageAddress:
- // TODO: Implement for read barrier.
- return HLoadClass::LoadKind::kDexCacheViaMethod;
- default:
- break;
- }
- }
switch (desired_class_load_kind) {
case HLoadClass::LoadKind::kReferrersClass:
break;
@@ -5379,10 +5263,15 @@
return;
}
- LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || kEmitCompilerReadBarrier)
+ const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
+ LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ }
+
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
if (load_kind == HLoadClass::LoadKind::kReferrersClass ||
load_kind == HLoadClass::LoadKind::kDexCacheViaMethod ||
@@ -5404,6 +5293,7 @@
Location out_loc = locations->Out();
Register out = out_loc.AsRegister<Register>();
+ const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
bool generate_null_check = false;
switch (cls->GetLoadKind()) {
case HLoadClass::LoadKind::kReferrersClass: {
@@ -5411,18 +5301,21 @@
DCHECK(!cls->MustGenerateClinitCheck());
// /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
Register current_method = locations->InAt(0).AsRegister<Register>();
- GenerateGcRootFieldLoad(
- cls, out_loc, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
+ GenerateGcRootFieldLoad(cls,
+ out_loc,
+ current_method,
+ ArtMethod::DeclaringClassOffset().Int32Value(),
+ requires_read_barrier);
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimeAddress: {
- DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK(!requires_read_barrier);
__ LoadLiteral(out, codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
cls->GetTypeIndex()));
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
- DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK(!requires_read_barrier);
CodeGeneratorARM::PcRelativePatchInfo* labels =
codegen_->NewPcRelativeTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
__ BindTrackedLabel(&labels->movw_label);
@@ -5434,7 +5327,7 @@
break;
}
case HLoadClass::LoadKind::kBootImageAddress: {
- DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK(!requires_read_barrier);
DCHECK_NE(cls->GetAddress(), 0u);
uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
__ LoadLiteral(out, codegen_->DeduplicateBootImageAddressLiteral(address));
@@ -5454,7 +5347,7 @@
uint32_t offset = address & MaxInt<uint32_t>(offset_bits);
__ LoadLiteral(out, codegen_->DeduplicateDexCacheAddressLiteral(base_address));
// /* GcRoot<mirror::Class> */ out = *(base_address + offset)
- GenerateGcRootFieldLoad(cls, out_loc, out, offset);
+ GenerateGcRootFieldLoad(cls, out_loc, out, offset, requires_read_barrier);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -5463,7 +5356,7 @@
HArmDexCacheArraysBase* base = cls->InputAt(0)->AsArmDexCacheArraysBase();
int32_t offset = cls->GetDexCacheElementOffset() - base->GetElementOffset();
// /* GcRoot<mirror::Class> */ out = *(dex_cache_arrays_base + offset)
- GenerateGcRootFieldLoad(cls, out_loc, base_reg, offset);
+ GenerateGcRootFieldLoad(cls, out_loc, base_reg, offset, requires_read_barrier);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -5477,7 +5370,7 @@
ArtMethod::DexCacheResolvedTypesOffset(kArmPointerSize).Int32Value());
// /* GcRoot<mirror::Class> */ out = out[type_index]
size_t offset = CodeGenerator::GetCacheOffset(cls->GetTypeIndex());
- GenerateGcRootFieldLoad(cls, out_loc, out, offset);
+ GenerateGcRootFieldLoad(cls, out_loc, out, offset, requires_read_barrier);
generate_null_check = !cls->IsInDexCache();
}
}
@@ -5569,24 +5462,28 @@
}
void LocationsBuilderARM::VisitLoadString(HLoadString* load) {
- LocationSummary::CallKind call_kind = (load->NeedsEnvironment() || kEmitCompilerReadBarrier)
- ? LocationSummary::kCallOnSlowPath
+ LocationSummary::CallKind call_kind = load->NeedsEnvironment()
+ ? LocationSummary::kCallOnMainOnly
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
+
HLoadString::LoadKind load_kind = load->GetLoadKind();
- if (load_kind == HLoadString::LoadKind::kDexCacheViaMethod ||
- load_kind == HLoadString::LoadKind::kDexCachePcRelative) {
+ DCHECK(load_kind != HLoadString::LoadKind::kDexCachePcRelative) << "Not supported";
+ if (load_kind == HLoadString::LoadKind::kDexCacheViaMethod) {
locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RegisterLocation(R0));
+ } else {
+ locations->SetOut(Location::RequiresRegister());
}
- locations->SetOut(Location::RequiresRegister());
}
void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
LocationSummary* locations = load->GetLocations();
Location out_loc = locations->Out();
Register out = out_loc.AsRegister<Register>();
+ HLoadString::LoadKind load_kind = load->GetLoadKind();
- switch (load->GetLoadKind()) {
+ switch (load_kind) {
case HLoadString::LoadKind::kBootImageLinkTimeAddress: {
DCHECK(!kEmitCompilerReadBarrier);
__ LoadLiteral(out, codegen_->DeduplicateBootImageStringLiteral(load->GetDexFile(),
@@ -5616,11 +5513,12 @@
break;
}
- // TODO: Re-add the compiler code to do string dex cache lookup again.
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load);
- codegen_->AddSlowPath(slow_path);
- __ b(slow_path->GetEntryLabel());
- __ Bind(slow_path->GetExitLabel());
+ // TODO: Consider re-adding the compiler code to do string dex cache lookup again.
+ DCHECK(load_kind == HLoadString::LoadKind::kDexCacheViaMethod);
+ InvokeRuntimeCallingConvention calling_convention;
+ __ LoadImmediate(calling_convention.GetRegisterAt(0), load->GetStringIndex());
+ codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
+ CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
}
static int32_t GetExceptionTlsOffset() {
@@ -5670,6 +5568,7 @@
void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
+ bool baker_read_barrier_slow_path = false;
switch (type_check_kind) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kAbstractClassCheck:
@@ -5677,6 +5576,7 @@
case TypeCheckKind::kArrayObjectCheck:
call_kind =
kEmitCompilerReadBarrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
+ baker_read_barrier_slow_path = kUseBakerReadBarrier;
break;
case TypeCheckKind::kArrayCheck:
case TypeCheckKind::kUnresolvedCheck:
@@ -5686,6 +5586,9 @@
}
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ if (baker_read_barrier_slow_path) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ }
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
// The "out" register is used as a temporary, so it overlaps with the inputs.
@@ -5856,6 +5759,7 @@
bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
+ bool baker_read_barrier_slow_path = false;
switch (type_check_kind) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kAbstractClassCheck:
@@ -5864,6 +5768,7 @@
call_kind = (throws_into_catch || kEmitCompilerReadBarrier) ?
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall; // In fact, call on a fatal (non-returning) slow path.
+ baker_read_barrier_slow_path = kUseBakerReadBarrier && !throws_into_catch;
break;
case TypeCheckKind::kArrayCheck:
case TypeCheckKind::kUnresolvedCheck:
@@ -5873,6 +5778,9 @@
}
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ if (baker_read_barrier_slow_path) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ }
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
// Note that TypeCheckSlowPathARM uses this "temp" register too.
@@ -6375,9 +6283,11 @@
void InstructionCodeGeneratorARM::GenerateGcRootFieldLoad(HInstruction* instruction,
Location root,
Register obj,
- uint32_t offset) {
+ uint32_t offset,
+ bool requires_read_barrier) {
Register root_reg = root.AsRegister<Register>();
- if (kEmitCompilerReadBarrier) {
+ if (requires_read_barrier) {
+ DCHECK(kEmitCompilerReadBarrier);
if (kUseBakerReadBarrier) {
// Fast path implementation of art::ReadBarrier::BarrierForRoot when
// Baker's read barrier are used:
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index a07dd6b..ce9d7e6 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -63,9 +63,9 @@
DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
};
-static constexpr DRegister FromLowSToD(SRegister reg) {
- return DCHECK_CONSTEXPR(reg % 2 == 0, , D0)
- static_cast<DRegister>(reg / 2);
+constexpr DRegister FromLowSToD(SRegister reg) {
+ DCHECK_EQ(reg % 2, 0);
+ return static_cast<DRegister>(reg / 2);
}
@@ -271,11 +271,12 @@
//
// root <- *(obj + offset)
//
- // while honoring read barriers (if any).
+ // while honoring read barriers if requires_read_barrier is true.
void GenerateGcRootFieldLoad(HInstruction* instruction,
Location root,
Register obj,
- uint32_t offset);
+ uint32_t offset,
+ bool requires_read_barrier = kEmitCompilerReadBarrier);
void GenerateTestAndBranch(HInstruction* instruction,
size_t condition_input_index,
Label* true_target,
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 17fc13c..1d2f334 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -336,36 +336,6 @@
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM64);
};
-class LoadStringSlowPathARM64 : public SlowPathCodeARM64 {
- public:
- explicit LoadStringSlowPathARM64(HLoadString* instruction) : SlowPathCodeARM64(instruction) {}
-
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = instruction_->GetLocations();
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
- CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
-
- __ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, locations);
-
- InvokeRuntimeCallingConvention calling_convention;
- const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
- __ Mov(calling_convention.GetRegisterAt(0).W(), string_index);
- arm64_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
- CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
- Primitive::Type type = instruction_->GetType();
- arm64_codegen->MoveLocation(locations->Out(), calling_convention.GetReturnLocation(type), type);
-
- RestoreLiveRegisters(codegen, locations);
- __ B(GetExitLabel());
- }
-
- const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM64"; }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64);
-};
-
class NullCheckSlowPathARM64 : public SlowPathCodeARM64 {
public:
explicit NullCheckSlowPathARM64(HNullCheck* instr) : SlowPathCodeARM64(instr) {}
@@ -494,7 +464,6 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, instruction_->GetLocations());
arm64_codegen->InvokeRuntime(kQuickDeoptimize, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickDeoptimize, void, void>();
}
@@ -1485,7 +1454,9 @@
SlowPathCode* slow_path) {
ValidateInvokeRuntime(instruction, slow_path);
GenerateInvokeRuntime(GetThreadOffset<kArm64PointerSize>(entrypoint).Int32Value());
- RecordPcInfo(instruction, dex_pc, slow_path);
+ if (EntrypointRequiresStackMap(entrypoint)) {
+ RecordPcInfo(instruction, dex_pc, slow_path);
+ }
}
void CodeGeneratorARM64::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
@@ -1636,6 +1607,9 @@
object_field_get_with_read_barrier ?
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
+ if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ }
locations->SetInAt(0, Location::RequiresRegister());
if (Primitive::IsFloatingPointType(instruction->GetType())) {
locations->SetOut(Location::RequiresFpuRegister());
@@ -2061,6 +2035,9 @@
object_array_get_with_read_barrier ?
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
+ if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ }
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
if (Primitive::IsFloatingPointType(instruction->GetType())) {
@@ -2171,11 +2148,6 @@
} else {
locations->SetInAt(2, Location::RequiresRegister());
}
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier && (value_type == Primitive::kPrimNot)) {
- // Additional temporary registers for a Baker read barrier.
- locations->AddTemp(Location::RequiresRegister());
- locations->AddTemp(Location::RequiresRegister());
- }
}
void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
@@ -2262,144 +2234,44 @@
__ Bind(&non_zero);
}
- if (kEmitCompilerReadBarrier) {
- if (!kUseBakerReadBarrier) {
- // When (non-Baker) read barriers are enabled, the type
- // checking instrumentation requires two read barriers
- // generated by CodeGeneratorARM64::GenerateReadBarrierSlow:
- //
- // __ Mov(temp2, temp);
- // // /* HeapReference<Class> */ temp = temp->component_type_
- // __ Ldr(temp, HeapOperand(temp, component_offset));
- // codegen_->GenerateReadBarrierSlow(
- // instruction, temp_loc, temp_loc, temp2_loc, component_offset);
- //
- // // /* HeapReference<Class> */ temp2 = value->klass_
- // __ Ldr(temp2, HeapOperand(Register(value), class_offset));
- // codegen_->GenerateReadBarrierSlow(
- // instruction, temp2_loc, temp2_loc, value_loc, class_offset, temp_loc);
- //
- // __ Cmp(temp, temp2);
- //
- // However, the second read barrier may trash `temp`, as it
- // is a temporary register, and as such would not be saved
- // along with live registers before calling the runtime (nor
- // restored afterwards). So in this case, we bail out and
- // delegate the work to the array set slow path.
- //
- // TODO: Extend the register allocator to support a new
- // "(locally) live temp" location so as to avoid always
- // going into the slow path when read barriers are enabled?
- //
- // There is no such problem with Baker read barriers (see below).
- __ B(slow_path->GetEntryLabel());
- } else {
- // Note that we cannot use `temps` (instance of VIXL's
- // UseScratchRegisterScope) to allocate `temp2` because
- // the Baker read barriers generated by
- // GenerateFieldLoadWithBakerReadBarrier below also use
- // that facility to allocate a temporary register, thus
- // making VIXL's scratch register pool empty.
- Location temp2_loc = locations->GetTemp(0);
- Register temp2 = WRegisterFrom(temp2_loc);
+ // Note that when Baker read barriers are enabled, the type
+ // checks are performed without read barriers. This is fine,
+ // even in the case where a class object is in the from-space
+ // after the flip, as a comparison involving such a type would
+ // not produce a false positive; it may of course produce a
+ // false negative, in which case we would take the ArraySet
+ // slow path.
- // Note: Because it is acquired from VIXL's scratch register
- // pool, `temp` might be IP0, and thus cannot be used as
- // `ref` argument of GenerateFieldLoadWithBakerReadBarrier
- // calls below (see ReadBarrierMarkSlowPathARM64 for more
- // details).
+ Register temp2 = temps.AcquireSameSizeAs(array);
+ // /* HeapReference<Class> */ temp = array->klass_
+ __ Ldr(temp, HeapOperand(array, class_offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ GetAssembler()->MaybeUnpoisonHeapReference(temp);
- // /* HeapReference<Class> */ temp2 = array->klass_
- codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
- temp2_loc,
- array,
- class_offset,
- temp,
- /* needs_null_check */ true,
- /* use_load_acquire */ false);
+ // /* HeapReference<Class> */ temp = temp->component_type_
+ __ Ldr(temp, HeapOperand(temp, component_offset));
+ // /* HeapReference<Class> */ temp2 = value->klass_
+ __ Ldr(temp2, HeapOperand(Register(value), class_offset));
+ // If heap poisoning is enabled, no need to unpoison `temp`
+ // nor `temp2`, as we are comparing two poisoned references.
+ __ Cmp(temp, temp2);
+ temps.Release(temp2);
- // /* HeapReference<Class> */ temp2 = temp2->component_type_
- codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
- temp2_loc,
- temp2,
- component_offset,
- temp,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
- // For the same reason that we request `temp2` from the
- // register allocator above, we cannot get `temp3` from
- // VIXL's scratch register pool.
- Location temp3_loc = locations->GetTemp(1);
- Register temp3 = WRegisterFrom(temp3_loc);
- // Register `temp2` is not trashed by the read barrier
- // emitted by GenerateFieldLoadWithBakerReadBarrier below,
- // as that method produces a call to a ReadBarrierMarkRegX
- // entry point, which saves all potentially live registers,
- // including temporaries such a `temp2`.
- // /* HeapReference<Class> */ temp3 = register_value->klass_
- codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
- temp3_loc,
- value.W(),
- class_offset,
- temp,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
- // If heap poisoning is enabled, `temp2` and `temp3` have
- // been unpoisoned by the the previous calls to
- // CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier.
- __ Cmp(temp2, temp3);
-
- if (instruction->StaticTypeOfArrayIsObjectArray()) {
- vixl::aarch64::Label do_put;
- __ B(eq, &do_put);
- // We do not need to emit a read barrier for the
- // following heap reference load, as `temp2` is only used
- // in a comparison with null below, and this reference
- // is not kept afterwards.
- // /* HeapReference<Class> */ temp = temp2->super_class_
- __ Ldr(temp, HeapOperand(temp2, super_offset));
- // If heap poisoning is enabled, no need to unpoison
- // `temp`, as we are comparing against null below.
- __ Cbnz(temp, slow_path->GetEntryLabel());
- __ Bind(&do_put);
- } else {
- __ B(ne, slow_path->GetEntryLabel());
- }
- }
- } else {
- // Non read barrier code.
-
- Register temp2 = temps.AcquireSameSizeAs(array);
- // /* HeapReference<Class> */ temp = array->klass_
- __ Ldr(temp, HeapOperand(array, class_offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ if (instruction->StaticTypeOfArrayIsObjectArray()) {
+ vixl::aarch64::Label do_put;
+ __ B(eq, &do_put);
+ // If heap poisoning is enabled, the `temp` reference has
+ // not been unpoisoned yet; unpoison it now.
GetAssembler()->MaybeUnpoisonHeapReference(temp);
- // /* HeapReference<Class> */ temp = temp->component_type_
- __ Ldr(temp, HeapOperand(temp, component_offset));
- // /* HeapReference<Class> */ temp2 = value->klass_
- __ Ldr(temp2, HeapOperand(Register(value), class_offset));
- // If heap poisoning is enabled, no need to unpoison `temp`
- // nor `temp2`, as we are comparing two poisoned references.
- __ Cmp(temp, temp2);
- temps.Release(temp2);
-
- if (instruction->StaticTypeOfArrayIsObjectArray()) {
- vixl::aarch64::Label do_put;
- __ B(eq, &do_put);
- // If heap poisoning is enabled, the `temp` reference has
- // not been unpoisoned yet; unpoison it now.
- GetAssembler()->MaybeUnpoisonHeapReference(temp);
-
- // /* HeapReference<Class> */ temp = temp->super_class_
- __ Ldr(temp, HeapOperand(temp, super_offset));
- // If heap poisoning is enabled, no need to unpoison
- // `temp`, as we are comparing against null below.
- __ Cbnz(temp, slow_path->GetEntryLabel());
- __ Bind(&do_put);
- } else {
- __ B(ne, slow_path->GetEntryLabel());
- }
+ // /* HeapReference<Class> */ temp = temp->super_class_
+ __ Ldr(temp, HeapOperand(temp, super_offset));
+ // If heap poisoning is enabled, no need to unpoison
+ // `temp`, as we are comparing against null below.
+ __ Cbnz(temp, slow_path->GetEntryLabel());
+ __ Bind(&do_put);
+ } else {
+ __ B(ne, slow_path->GetEntryLabel());
}
}
@@ -3052,6 +2924,7 @@
void LocationsBuilderARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -3185,6 +3058,7 @@
void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
+ bool baker_read_barrier_slow_path = false;
switch (type_check_kind) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kAbstractClassCheck:
@@ -3192,6 +3066,7 @@
case TypeCheckKind::kArrayObjectCheck:
call_kind =
kEmitCompilerReadBarrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
+ baker_read_barrier_slow_path = kUseBakerReadBarrier;
break;
case TypeCheckKind::kArrayCheck:
case TypeCheckKind::kUnresolvedCheck:
@@ -3201,6 +3076,9 @@
}
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ if (baker_read_barrier_slow_path) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ }
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
// The "out" register is used as a temporary, so it overlaps with the inputs.
@@ -3372,6 +3250,7 @@
bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
+ bool baker_read_barrier_slow_path = false;
switch (type_check_kind) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kAbstractClassCheck:
@@ -3380,6 +3259,7 @@
call_kind = (throws_into_catch || kEmitCompilerReadBarrier) ?
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall; // In fact, call on a fatal (non-returning) slow path.
+ baker_read_barrier_slow_path = kUseBakerReadBarrier && !throws_into_catch;
break;
case TypeCheckKind::kArrayCheck:
case TypeCheckKind::kUnresolvedCheck:
@@ -3389,6 +3269,9 @@
}
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ if (baker_read_barrier_slow_path) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ }
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
// Note that TypeCheckSlowPathARM64 uses this "temp" register too.
@@ -4026,17 +3909,6 @@
HLoadClass::LoadKind CodeGeneratorARM64::GetSupportedLoadClassKind(
HLoadClass::LoadKind desired_class_load_kind) {
- if (kEmitCompilerReadBarrier) {
- switch (desired_class_load_kind) {
- case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
- case HLoadClass::LoadKind::kBootImageAddress:
- // TODO: Implement for read barrier.
- return HLoadClass::LoadKind::kDexCacheViaMethod;
- default:
- break;
- }
- }
switch (desired_class_load_kind) {
case HLoadClass::LoadKind::kReferrersClass:
break;
@@ -4071,10 +3943,15 @@
return;
}
- LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || kEmitCompilerReadBarrier)
+ const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
+ LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ }
+
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
if (load_kind == HLoadClass::LoadKind::kReferrersClass ||
load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
@@ -4094,6 +3971,7 @@
Location out_loc = cls->GetLocations()->Out();
Register out = OutputRegister(cls);
+ const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
bool generate_null_check = false;
switch (cls->GetLoadKind()) {
case HLoadClass::LoadKind::kReferrersClass: {
@@ -4101,17 +3979,21 @@
DCHECK(!cls->MustGenerateClinitCheck());
// /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
Register current_method = InputRegisterAt(cls, 0);
- GenerateGcRootFieldLoad(
- cls, out_loc, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
+ GenerateGcRootFieldLoad(cls,
+ out_loc,
+ current_method,
+ ArtMethod::DeclaringClassOffset().Int32Value(),
+ /*fixup_label*/ nullptr,
+ requires_read_barrier);
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK(!requires_read_barrier);
__ Ldr(out, codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
cls->GetTypeIndex()));
break;
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
- DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK(!requires_read_barrier);
// Add ADRP with its PC-relative type patch.
const DexFile& dex_file = cls->GetDexFile();
uint32_t type_index = cls->GetTypeIndex();
@@ -4132,7 +4014,7 @@
break;
}
case HLoadClass::LoadKind::kBootImageAddress: {
- DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK(!requires_read_barrier);
DCHECK(cls->GetAddress() != 0u && IsUint<32>(cls->GetAddress()));
__ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(cls->GetAddress()));
break;
@@ -4150,7 +4032,12 @@
uint32_t offset = cls->GetAddress() & MaxInt<uint64_t>(offset_bits);
__ Ldr(out.X(), codegen_->DeduplicateDexCacheAddressLiteral(base_address));
// /* GcRoot<mirror::Class> */ out = *(base_address + offset)
- GenerateGcRootFieldLoad(cls, out_loc, out.X(), offset);
+ GenerateGcRootFieldLoad(cls,
+ out_loc,
+ out.X(),
+ offset,
+ /*fixup_label*/ nullptr,
+ requires_read_barrier);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -4169,7 +4056,12 @@
vixl::aarch64::Label* ldr_label =
codegen_->NewPcRelativeDexCacheArrayPatch(dex_file, element_offset, adrp_label);
// /* GcRoot<mirror::Class> */ out = *(base_address + offset) /* PC-relative */
- GenerateGcRootFieldLoad(cls, out_loc, out.X(), /* offset placeholder */ 0, ldr_label);
+ GenerateGcRootFieldLoad(cls,
+ out_loc,
+ out.X(),
+ /* offset placeholder */ 0,
+ ldr_label,
+ requires_read_barrier);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -4181,8 +4073,12 @@
Register current_method = InputRegisterAt(cls, 0);
__ Ldr(out.X(), MemOperand(current_method, resolved_types_offset.Int32Value()));
// /* GcRoot<mirror::Class> */ out = out[type_index]
- GenerateGcRootFieldLoad(
- cls, out_loc, out.X(), CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
+ GenerateGcRootFieldLoad(cls,
+ out_loc,
+ out.X(),
+ CodeGenerator::GetCacheOffset(cls->GetTypeIndex()),
+ /*fixup_label*/ nullptr,
+ requires_read_barrier);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -4261,14 +4157,17 @@
}
void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
- LocationSummary::CallKind call_kind = (load->NeedsEnvironment() || kEmitCompilerReadBarrier)
- ? LocationSummary::kCallOnSlowPath
+ LocationSummary::CallKind call_kind = load->NeedsEnvironment()
+ ? LocationSummary::kCallOnMainOnly
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
if (load->GetLoadKind() == HLoadString::LoadKind::kDexCacheViaMethod) {
locations->SetInAt(0, Location::RequiresRegister());
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetOut(calling_convention.GetReturnLocation(load->GetType()));
+ } else {
+ locations->SetOut(Location::RequiresRegister());
}
- locations->SetOut(Location::RequiresRegister());
}
void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
@@ -4312,10 +4211,10 @@
}
// TODO: Re-add the compiler code to do string dex cache lookup again.
- SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM64(load);
- codegen_->AddSlowPath(slow_path);
- __ B(slow_path->GetEntryLabel());
- __ Bind(slow_path->GetExitLabel());
+ InvokeRuntimeCallingConvention calling_convention;
+ __ Mov(calling_convention.GetRegisterAt(0).W(), load->GetStringIndex());
+ codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
+ CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
}
void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
@@ -4795,7 +4694,9 @@
}
void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
}
void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
@@ -5078,9 +4979,11 @@
Location root,
Register obj,
uint32_t offset,
- vixl::aarch64::Label* fixup_label) {
+ vixl::aarch64::Label* fixup_label,
+ bool requires_read_barrier) {
Register root_reg = RegisterFrom(root, Primitive::kPrimNot);
- if (kEmitCompilerReadBarrier) {
+ if (requires_read_barrier) {
+ DCHECK(kEmitCompilerReadBarrier);
if (kUseBakerReadBarrier) {
// Fast path implementation of art::ReadBarrier::BarrierForRoot when
// Baker's read barrier are used:
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 78db803..f0d7910 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -294,7 +294,8 @@
Location root,
vixl::aarch64::Register obj,
uint32_t offset,
- vixl::aarch64::Label* fixup_label = nullptr);
+ vixl::aarch64::Label* fixup_label = nullptr,
+ bool requires_read_barrier = kEmitCompilerReadBarrier);
// Generate a floating-point comparison.
void GenerateFcmp(HInstruction* instruction);
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 8a2f90d..92e9cd9 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -20,6 +20,7 @@
#include "arch/mips/instruction_set_features_mips.h"
#include "art_method.h"
#include "code_generator_utils.h"
+#include "compiled_method.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "gc/accounting/card_table.h"
@@ -170,14 +171,10 @@
locations->InAt(1),
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimInt);
- uint32_t entry_point_offset = instruction_->AsBoundsCheck()->IsStringCharAt()
- ? QUICK_ENTRY_POINT(pThrowStringBounds)
- : QUICK_ENTRY_POINT(pThrowArrayBounds);
- mips_codegen->InvokeRuntime(entry_point_offset,
- instruction_,
- instruction_->GetDexPc(),
- this,
- IsDirectEntrypoint(kQuickThrowArrayBounds));
+ QuickEntrypointEnum entrypoint = instruction_->AsBoundsCheck()->IsStringCharAt()
+ ? kQuickThrowStringBounds
+ : kQuickThrowArrayBounds;
+ mips_codegen->InvokeRuntime(entrypoint, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowStringBounds, void, int32_t, int32_t>();
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
@@ -201,11 +198,7 @@
// Live registers will be restored in the catch block if caught.
SaveLiveRegisters(codegen, instruction_->GetLocations());
}
- mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
- instruction_,
- instruction_->GetDexPc(),
- this,
- IsDirectEntrypoint(kQuickThrowDivZero));
+ mips_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
@@ -237,12 +230,9 @@
InvokeRuntimeCallingConvention calling_convention;
__ LoadConst32(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
- int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
- : QUICK_ENTRY_POINT(pInitializeType);
- bool direct = do_clinit_ ? IsDirectEntrypoint(kQuickInitializeStaticStorage)
- : IsDirectEntrypoint(kQuickInitializeType);
-
- mips_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this, direct);
+ QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
+ : kQuickInitializeType;
+ mips_codegen->InvokeRuntime(entrypoint, at_, dex_pc_, this);
if (do_clinit_) {
CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
} else {
@@ -295,11 +285,7 @@
InvokeRuntimeCallingConvention calling_convention;
const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
__ LoadConst32(calling_convention.GetRegisterAt(0), string_index);
- mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
- instruction_,
- instruction_->GetDexPc(),
- this,
- IsDirectEntrypoint(kQuickResolveString));
+ mips_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
Primitive::Type type = instruction_->GetType();
mips_codegen->MoveLocation(locations->Out(),
@@ -327,11 +313,10 @@
// Live registers will be restored in the catch block if caught.
SaveLiveRegisters(codegen, instruction_->GetLocations());
}
- mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
+ mips_codegen->InvokeRuntime(kQuickThrowNullPointer,
instruction_,
instruction_->GetDexPc(),
- this,
- IsDirectEntrypoint(kQuickThrowNullPointer));
+ this);
CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
}
@@ -351,11 +336,7 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
- mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
- instruction_,
- instruction_->GetDexPc(),
- this,
- IsDirectEntrypoint(kQuickTestSuspend));
+ mips_codegen->InvokeRuntime(kQuickTestSuspend, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
if (successor_ == nullptr) {
__ B(GetReturnLabel());
@@ -407,11 +388,7 @@
Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
- mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
- instruction_,
- dex_pc,
- this,
- IsDirectEntrypoint(kQuickInstanceofNonTrivial));
+ mips_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this);
CheckEntrypointTypes<
kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>();
Primitive::Type ret_type = instruction_->GetType();
@@ -419,11 +396,7 @@
mips_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
} else {
DCHECK(instruction_->IsCheckCast());
- mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
- instruction_,
- dex_pc,
- this,
- IsDirectEntrypoint(kQuickCheckCast));
+ mips_codegen->InvokeRuntime(kQuickCheckCast, instruction_, dex_pc, this);
CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
}
@@ -445,12 +418,7 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, instruction_->GetLocations());
- mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
- instruction_,
- instruction_->GetDexPc(),
- this,
- IsDirectEntrypoint(kQuickDeoptimize));
+ mips_codegen->InvokeRuntime(kQuickDeoptimize, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickDeoptimize, void, void>();
}
@@ -697,16 +665,17 @@
if ((fpu_spill_mask_ != 0) && (POPCOUNT(core_spill_mask_) % 2 != 0)) {
core_spill_mask_ |= (1 << ZERO);
}
+}
+
+bool CodeGeneratorMIPS::HasAllocatedCalleeSaveRegisters() const {
// If RA is clobbered by PC-relative operations on R2 and it's the only spilled register
- // (this can happen in leaf methods), artificially spill the ZERO register in order to
- // force explicit saving and restoring of RA. RA isn't saved/restored when it's the only
- // spilled register.
+ // (this can happen in leaf methods), force CodeGenerator::InitializeCodeGeneration()
+ // into the path that creates a stack frame so that RA can be explicitly saved and restored.
+ // RA can't otherwise be saved/restored when it's the only spilled register.
// TODO: Can this be improved? It causes creation of a stack frame (while RA might be
// saved in an unused temporary register) and saving of RA and the current method pointer
// in the frame.
- if (clobbered_ra_ && core_spill_mask_ == (1u << RA) && fpu_spill_mask_ == 0) {
- core_spill_mask_ |= (1 << ZERO);
- }
+ return CodeGenerator::HasAllocatedCalleeSaveRegisters() || clobbered_ra_;
}
static dwarf::Reg DWARFReg(Register reg) {
@@ -729,6 +698,9 @@
}
if (HasEmptyFrame()) {
+ CHECK_EQ(fpu_spill_mask_, 0u);
+ CHECK_EQ(core_spill_mask_, 1u << RA);
+ CHECK(!clobbered_ra_);
return;
}
@@ -762,6 +734,7 @@
}
// Store the current method pointer.
+ // TODO: can we not do this if RequiresCurrentMethod() returns false?
__ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
}
@@ -792,12 +765,24 @@
// TODO: __ cfi().Restore(DWARFReg(reg));
}
- __ DecreaseFrameSize(GetFrameSize());
+ size_t frame_size = GetFrameSize();
+ // Adjust the stack pointer in the delay slot if doing so doesn't break CFI.
+ bool exchange = IsInt<16>(static_cast<int32_t>(frame_size));
+ bool reordering = __ SetReorder(false);
+ if (exchange) {
+ __ Jr(RA);
+ __ DecreaseFrameSize(frame_size); // Single instruction in delay slot.
+ } else {
+ __ DecreaseFrameSize(frame_size);
+ __ Jr(RA);
+ __ Nop(); // In delay slot.
+ }
+ __ SetReorder(reordering);
+ } else {
+ __ Jr(RA);
+ __ NopIfNoReordering();
}
- __ Jr(RA);
- __ Nop();
-
__ cfi().RestoreState();
__ cfi().DefCFAOffset(GetFrameSize());
}
@@ -1233,27 +1218,17 @@
stream << FRegister(reg);
}
+constexpr size_t kMipsDirectEntrypointRuntimeOffset = 16;
+
void CodeGeneratorMIPS::InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- InvokeRuntime(GetThreadOffset<kMipsPointerSize>(entrypoint).Int32Value(),
- instruction,
- dex_pc,
- slow_path,
- IsDirectEntrypoint(entrypoint));
-}
-
-constexpr size_t kMipsDirectEntrypointRuntimeOffset = 16;
-
-void CodeGeneratorMIPS::InvokeRuntime(int32_t entry_point_offset,
- HInstruction* instruction,
- uint32_t dex_pc,
- SlowPathCode* slow_path,
- bool is_direct_entrypoint) {
- __ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset);
+ ValidateInvokeRuntime(instruction, slow_path);
+ bool reordering = __ SetReorder(false);
+ __ LoadFromOffset(kLoadWord, T9, TR, GetThreadOffset<kMipsPointerSize>(entrypoint).Int32Value());
__ Jalr(T9);
- if (is_direct_entrypoint) {
+ if (IsDirectEntrypoint(entrypoint)) {
// Reserve argument space on stack (for $a0-$a3) for
// entrypoints that directly reference native implementations.
// Called function may use this space to store $a0-$a3 regs.
@@ -1262,7 +1237,10 @@
} else {
__ Nop(); // In delay slot.
}
- RecordPcInfo(instruction, dex_pc, slow_path);
+ __ SetReorder(reordering);
+ if (EntrypointRequiresStackMap(entrypoint)) {
+ RecordPcInfo(instruction, dex_pc, slow_path);
+ }
}
void InstructionCodeGeneratorMIPS::GenerateClassInitializationCheck(SlowPathCodeMIPS* slow_path,
@@ -2066,11 +2044,7 @@
}
} else {
DCHECK_EQ(value_type, Primitive::kPrimNot);
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
- instruction,
- instruction->GetDexPc(),
- nullptr,
- IsDirectEntrypoint(kQuickAputObject));
+ codegen_->InvokeRuntime(kQuickAputObject, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
}
break;
@@ -2636,11 +2610,7 @@
GenerateDivRemIntegral(instruction);
break;
case Primitive::kPrimLong: {
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv),
- instruction,
- instruction->GetDexPc(),
- nullptr,
- IsDirectEntrypoint(kQuickLdiv));
+ codegen_->InvokeRuntime(kQuickLdiv, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickLdiv, int64_t, int64_t, int64_t>();
break;
}
@@ -3499,6 +3469,7 @@
void LocationsBuilderMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -3626,11 +3597,7 @@
// Do implicit Null check
__ Lw(ZERO, locations->GetTemp(0).AsRegister<Register>(), 0);
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pA64Load),
- instruction,
- dex_pc,
- nullptr,
- IsDirectEntrypoint(kQuickA64Load));
+ codegen_->InvokeRuntime(kQuickA64Load, instruction, dex_pc);
CheckEntrypointTypes<kQuickA64Load, int64_t, volatile const int64_t*>();
if (type == Primitive::kPrimDouble) {
// FP results are returned in core registers. Need to move them.
@@ -3778,11 +3745,7 @@
value);
}
}
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pA64Store),
- instruction,
- dex_pc,
- nullptr,
- IsDirectEntrypoint(kQuickA64Store));
+ codegen_->InvokeRuntime(kQuickA64Store, instruction, dex_pc);
CheckEntrypointTypes<kQuickA64Store, void, volatile int64_t *, int64_t>();
} else {
if (!Primitive::IsFloatingPointType(type)) {
@@ -3953,7 +3916,7 @@
__ LoadFromOffset(kLoadWord, T9, temp, entry_point.Int32Value());
// T9();
__ Jalr(T9);
- __ Nop();
+ __ NopIfNoReordering();
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
@@ -4254,7 +4217,7 @@
// T9 prepared above for better instruction scheduling.
// T9()
__ Jalr(T9);
- __ Nop();
+ __ NopIfNoReordering();
break;
case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
// TODO: Implement this type.
@@ -4270,7 +4233,7 @@
kMipsPointerSize).Int32Value());
// T9()
__ Jalr(T9);
- __ Nop();
+ __ NopIfNoReordering();
break;
}
DCHECK(!IsLeafMethod());
@@ -4312,7 +4275,7 @@
__ LoadFromOffset(kLoadWord, T9, temp, entry_point.Int32Value());
// T9();
__ Jalr(T9);
- __ Nop();
+ __ NopIfNoReordering();
}
void InstructionCodeGeneratorMIPS::VisitInvokeVirtual(HInvokeVirtual* invoke) {
@@ -4366,11 +4329,7 @@
LocationSummary* locations = cls->GetLocations();
if (cls->NeedsAccessCheck()) {
codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex());
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInitializeTypeAndVerifyAccess),
- cls,
- cls->GetDexPc(),
- nullptr,
- IsDirectEntrypoint(kQuickInitializeTypeAndVerifyAccess));
+ codegen_->InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
return;
}
@@ -4421,6 +4380,7 @@
DCHECK(!kEmitCompilerReadBarrier);
CodeGeneratorMIPS::PcRelativePatchInfo* info =
codegen_->NewPcRelativeTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
+ bool reordering = __ SetReorder(false);
if (isR6) {
__ Bind(&info->high_label);
__ Bind(&info->pc_rel_label);
@@ -4436,6 +4396,7 @@
// Add a 32-bit offset to PC.
__ Addu(out, out, base_or_current_method_reg);
}
+ __ SetReorder(reordering);
break;
}
case HLoadClass::LoadKind::kBootImageAddress: {
@@ -4579,6 +4540,7 @@
DCHECK(!kEmitCompilerReadBarrier);
CodeGeneratorMIPS::PcRelativePatchInfo* info =
codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
+ bool reordering = __ SetReorder(false);
if (isR6) {
__ Bind(&info->high_label);
__ Bind(&info->pc_rel_label);
@@ -4594,6 +4556,7 @@
// Add a 32-bit offset to PC.
__ Addu(out, out, base_or_current_method_reg);
}
+ __ SetReorder(reordering);
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
@@ -4634,18 +4597,10 @@
void InstructionCodeGeneratorMIPS::VisitMonitorOperation(HMonitorOperation* instruction) {
if (instruction->IsEnter()) {
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLockObject),
- instruction,
- instruction->GetDexPc(),
- nullptr,
- IsDirectEntrypoint(kQuickLockObject));
+ codegen_->InvokeRuntime(kQuickLockObject, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
} else {
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pUnlockObject),
- instruction,
- instruction->GetDexPc(),
- nullptr,
- IsDirectEntrypoint(kQuickUnlockObject));
+ codegen_->InvokeRuntime(kQuickUnlockObject, instruction, instruction->GetDexPc());
}
CheckEntrypointTypes<kQuickUnlockObject, void, mirror::Object*>();
}
@@ -4820,12 +4775,7 @@
__ Lw(current_method_register, SP, kCurrentMethodStackOffset);
// Move an uint16_t value to a register.
__ LoadConst32(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
- codegen_->InvokeRuntime(
- GetThreadOffset<kMipsPointerSize>(instruction->GetEntrypoint()).Int32Value(),
- instruction,
- instruction->GetDexPc(),
- nullptr,
- IsDirectEntrypoint(kQuickAllocArrayWithAccessCheck));
+ codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck,
void*, uint32_t, int32_t, ArtMethod*>();
}
@@ -4851,15 +4801,10 @@
__ LoadFromOffset(kLoadWord, temp, TR, QUICK_ENTRY_POINT(pNewEmptyString));
__ LoadFromOffset(kLoadWord, T9, temp, code_offset.Int32Value());
__ Jalr(T9);
- __ Nop();
+ __ NopIfNoReordering();
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
- codegen_->InvokeRuntime(
- GetThreadOffset<kMipsPointerSize>(instruction->GetEntrypoint()).Int32Value(),
- instruction,
- instruction->GetDexPc(),
- nullptr,
- IsDirectEntrypoint(kQuickAllocObjectWithAccessCheck));
+ codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
}
}
@@ -5044,27 +4989,17 @@
GenerateDivRemIntegral(instruction);
break;
case Primitive::kPrimLong: {
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLmod),
- instruction,
- instruction->GetDexPc(),
- nullptr,
- IsDirectEntrypoint(kQuickLmod));
+ codegen_->InvokeRuntime(kQuickLmod, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickLmod, int64_t, int64_t, int64_t>();
break;
}
case Primitive::kPrimFloat: {
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmodf),
- instruction, instruction->GetDexPc(),
- nullptr,
- IsDirectEntrypoint(kQuickFmodf));
+ codegen_->InvokeRuntime(kQuickFmodf, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickFmodf, float, float, float>();
break;
}
case Primitive::kPrimDouble: {
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmod),
- instruction, instruction->GetDexPc(),
- nullptr,
- IsDirectEntrypoint(kQuickFmod));
+ codegen_->InvokeRuntime(kQuickFmod, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickFmod, double, double, double>();
break;
}
@@ -5220,7 +5155,9 @@
}
void LocationsBuilderMIPS::VisitSuspendCheck(HSuspendCheck* instruction) {
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
}
void InstructionCodeGeneratorMIPS::VisitSuspendCheck(HSuspendCheck* instruction) {
@@ -5245,11 +5182,7 @@
}
void InstructionCodeGeneratorMIPS::VisitThrow(HThrow* instruction) {
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pDeliverException),
- instruction,
- instruction->GetDexPc(),
- nullptr,
- IsDirectEntrypoint(kQuickDeliverException));
+ codegen_->InvokeRuntime(kQuickDeliverException, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
}
@@ -5370,15 +5303,9 @@
__ Cvtdl(dst, FTMP);
}
} else {
- int32_t entry_offset = (result_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pL2f)
- : QUICK_ENTRY_POINT(pL2d);
- bool direct = (result_type == Primitive::kPrimFloat) ? IsDirectEntrypoint(kQuickL2f)
- : IsDirectEntrypoint(kQuickL2d);
- codegen_->InvokeRuntime(entry_offset,
- conversion,
- conversion->GetDexPc(),
- nullptr,
- direct);
+ QuickEntrypointEnum entrypoint = (result_type == Primitive::kPrimFloat) ? kQuickL2f
+ : kQuickL2d;
+ codegen_->InvokeRuntime(entrypoint, conversion, conversion->GetDexPc());
if (result_type == Primitive::kPrimFloat) {
CheckEntrypointTypes<kQuickL2f, float, int64_t>();
} else {
@@ -5471,11 +5398,9 @@
__ Bind(&done);
} else {
- int32_t entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2l)
- : QUICK_ENTRY_POINT(pD2l);
- bool direct = (result_type == Primitive::kPrimFloat) ? IsDirectEntrypoint(kQuickF2l)
- : IsDirectEntrypoint(kQuickD2l);
- codegen_->InvokeRuntime(entry_offset, conversion, conversion->GetDexPc(), nullptr, direct);
+ QuickEntrypointEnum entrypoint = (input_type == Primitive::kPrimFloat) ? kQuickF2l
+ : kQuickD2l;
+ codegen_->InvokeRuntime(entrypoint, conversion, conversion->GetDexPc());
if (input_type == Primitive::kPrimFloat) {
CheckEntrypointTypes<kQuickF2l, int64_t, float>();
} else {
@@ -5751,7 +5676,7 @@
Register reg = base->GetLocations()->Out().AsRegister<Register>();
CodeGeneratorMIPS::PcRelativePatchInfo* info =
codegen_->NewPcRelativeDexCacheArrayPatch(base->GetDexFile(), base->GetElementOffset());
-
+ bool reordering = __ SetReorder(false);
if (codegen_->GetInstructionSetFeatures().IsR6()) {
__ Bind(&info->high_label);
__ Bind(&info->pc_rel_label);
@@ -5769,6 +5694,7 @@
__ Addu(reg, reg, RA);
// TODO: Can we share this code with that of VisitMipsComputeBaseMethodAddress()?
}
+ __ SetReorder(reordering);
}
void LocationsBuilderMIPS::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 46810d6..7ba6c0d 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -274,6 +274,7 @@
virtual ~CodeGeneratorMIPS() {}
void ComputeSpillMask() OVERRIDE;
+ bool HasAllocatedCalleeSaveRegisters() const OVERRIDE;
void GenerateFrameEntry() OVERRIDE;
void GenerateFrameExit() OVERRIDE;
@@ -347,13 +348,7 @@
void InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
- SlowPathCode* slow_path) OVERRIDE;
-
- void InvokeRuntime(int32_t offset,
- HInstruction* instruction,
- uint32_t dex_pc,
- SlowPathCode* slow_path,
- bool is_direct_entrypoint);
+ SlowPathCode* slow_path = nullptr) OVERRIDE;
ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 4a5755c..664d498 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -127,13 +127,10 @@
locations->InAt(1),
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimInt);
- uint32_t entry_point_offset = instruction_->AsBoundsCheck()->IsStringCharAt()
- ? QUICK_ENTRY_POINT(pThrowStringBounds)
- : QUICK_ENTRY_POINT(pThrowArrayBounds);
- mips64_codegen->InvokeRuntime(entry_point_offset,
- instruction_,
- instruction_->GetDexPc(),
- this);
+ QuickEntrypointEnum entrypoint = instruction_->AsBoundsCheck()->IsStringCharAt()
+ ? kQuickThrowStringBounds
+ : kQuickThrowArrayBounds;
+ mips64_codegen->InvokeRuntime(entrypoint, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowStringBounds, void, int32_t, int32_t>();
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
@@ -157,10 +154,7 @@
// Live registers will be restored in the catch block if caught.
SaveLiveRegisters(codegen, instruction_->GetLocations());
}
- mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
- instruction_,
- instruction_->GetDexPc(),
- this);
+ mips64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
@@ -191,9 +185,9 @@
InvokeRuntimeCallingConvention calling_convention;
__ LoadConst32(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
- int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
- : QUICK_ENTRY_POINT(pInitializeType);
- mips64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
+ QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
+ : kQuickInitializeType;
+ mips64_codegen->InvokeRuntime(entrypoint, at_, dex_pc_, this);
if (do_clinit_) {
CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
} else {
@@ -246,7 +240,7 @@
InvokeRuntimeCallingConvention calling_convention;
const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
__ LoadConst32(calling_convention.GetRegisterAt(0), string_index);
- mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
+ mips64_codegen->InvokeRuntime(kQuickResolveString,
instruction_,
instruction_->GetDexPc(),
this);
@@ -277,7 +271,7 @@
// Live registers will be restored in the catch block if caught.
SaveLiveRegisters(codegen, instruction_->GetLocations());
}
- mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
+ mips64_codegen->InvokeRuntime(kQuickThrowNullPointer,
instruction_,
instruction_->GetDexPc(),
this);
@@ -300,10 +294,7 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
- mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
- instruction_,
- instruction_->GetDexPc(),
- this);
+ mips64_codegen->InvokeRuntime(kQuickTestSuspend, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
if (successor_ == nullptr) {
__ Bc(GetReturnLabel());
@@ -355,10 +346,7 @@
Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
- mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
- instruction_,
- dex_pc,
- this);
+ mips64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this);
CheckEntrypointTypes<
kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>();
Primitive::Type ret_type = instruction_->GetType();
@@ -366,7 +354,7 @@
mips64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
} else {
DCHECK(instruction_->IsCheckCast());
- mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc, this);
+ mips64_codegen->InvokeRuntime(kQuickCheckCast, instruction_, dex_pc, this);
CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
}
@@ -388,11 +376,7 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, instruction_->GetLocations());
- mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
- instruction_,
- instruction_->GetDexPc(),
- this);
+ mips64_codegen->InvokeRuntime(kQuickDeoptimize, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickDeoptimize, void, void>();
}
@@ -959,25 +943,20 @@
}
void CodeGeneratorMIPS64::InvokeRuntime(QuickEntrypointEnum entrypoint,
- HInstruction* instruction,
- uint32_t dex_pc,
- SlowPathCode* slow_path) {
- InvokeRuntime(GetThreadOffset<kMips64PointerSize>(entrypoint).Int32Value(),
- instruction,
- dex_pc,
- slow_path);
-}
-
-void CodeGeneratorMIPS64::InvokeRuntime(int32_t entry_point_offset,
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
ValidateInvokeRuntime(instruction, slow_path);
// TODO: anything related to T9/GP/GOT/PIC/.so's?
- __ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
+ __ LoadFromOffset(kLoadDoubleword,
+ T9,
+ TR,
+ GetThreadOffset<kMips64PointerSize>(entrypoint).Int32Value());
__ Jalr(T9);
__ Nop();
- RecordPcInfo(instruction, dex_pc, slow_path);
+ if (EntrypointRequiresStackMap(entrypoint)) {
+ RecordPcInfo(instruction, dex_pc, slow_path);
+ }
}
void InstructionCodeGeneratorMIPS64::GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path,
@@ -1514,10 +1493,7 @@
}
} else {
DCHECK_EQ(value_type, Primitive::kPrimNot);
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
- instruction,
- instruction->GetDexPc(),
- nullptr);
+ codegen_->InvokeRuntime(kQuickAputObject, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
}
break;
@@ -2654,6 +2630,7 @@
void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -3187,10 +3164,7 @@
LocationSummary* locations = cls->GetLocations();
if (cls->NeedsAccessCheck()) {
codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex());
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInitializeTypeAndVerifyAccess),
- cls,
- cls->GetDexPc(),
- nullptr);
+ codegen_->InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
return;
}
@@ -3285,12 +3259,9 @@
}
void InstructionCodeGeneratorMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
- codegen_->InvokeRuntime(instruction->IsEnter()
- ? QUICK_ENTRY_POINT(pLockObject)
- : QUICK_ENTRY_POINT(pUnlockObject),
+ codegen_->InvokeRuntime(instruction->IsEnter() ? kQuickLockObject : kQuickUnlockObject,
instruction,
- instruction->GetDexPc(),
- nullptr);
+ instruction->GetDexPc());
if (instruction->IsEnter()) {
CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
} else {
@@ -3418,10 +3389,7 @@
LocationSummary* locations = instruction->GetLocations();
// Move an uint16_t value to a register.
__ LoadConst32(locations->GetTemp(0).AsRegister<GpuRegister>(), instruction->GetTypeIndex());
- codegen_->InvokeRuntime(instruction->GetEntrypoint(),
- instruction,
- instruction->GetDexPc(),
- nullptr);
+ codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
}
@@ -3450,10 +3418,7 @@
__ Nop();
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
- codegen_->InvokeRuntime(instruction->GetEntrypoint(),
- instruction,
- instruction->GetDexPc(),
- nullptr);
+ codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
}
}
@@ -3624,9 +3589,8 @@
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
- int32_t entry_offset = (type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pFmodf)
- : QUICK_ENTRY_POINT(pFmod);
- codegen_->InvokeRuntime(entry_offset, instruction, instruction->GetDexPc(), nullptr);
+ QuickEntrypointEnum entrypoint = (type == Primitive::kPrimFloat) ? kQuickFmodf : kQuickFmod;
+ codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
if (type == Primitive::kPrimFloat) {
CheckEntrypointTypes<kQuickFmodf, float, float, float>();
} else {
@@ -3782,7 +3746,9 @@
}
void LocationsBuilderMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
}
void InstructionCodeGeneratorMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
@@ -3807,10 +3773,7 @@
}
void InstructionCodeGeneratorMIPS64::VisitThrow(HThrow* instruction) {
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pDeliverException),
- instruction,
- instruction->GetDexPc(),
- nullptr);
+ codegen_->InvokeRuntime(kQuickDeliverException, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
}
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 197f86b..3910530 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -323,12 +323,7 @@
void InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
- SlowPathCode* slow_path) OVERRIDE;
-
- void InvokeRuntime(int32_t offset,
- HInstruction* instruction,
- uint32_t dex_pc,
- SlowPathCode* slow_path);
+ SlowPathCode* slow_path = nullptr) OVERRIDE;
ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 675c5e0..b3b648f 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -62,7 +62,7 @@
// Live registers will be restored in the catch block if caught.
SaveLiveRegisters(codegen, instruction_->GetLocations());
}
- x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
+ x86_codegen->InvokeRuntime(kQuickThrowNullPointer,
instruction_,
instruction_->GetDexPc(),
this);
@@ -88,10 +88,7 @@
// Live registers will be restored in the catch block if caught.
SaveLiveRegisters(codegen, instruction_->GetLocations());
}
- x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
- instruction_,
- instruction_->GetDexPc(),
- this);
+ x86_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
@@ -165,13 +162,10 @@
length_loc,
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimInt);
- uint32_t entry_point_offset = instruction_->AsBoundsCheck()->IsStringCharAt()
- ? QUICK_ENTRY_POINT(pThrowStringBounds)
- : QUICK_ENTRY_POINT(pThrowArrayBounds);
- x86_codegen->InvokeRuntime(entry_point_offset,
- instruction_,
- instruction_->GetDexPc(),
- this);
+ QuickEntrypointEnum entrypoint = instruction_->AsBoundsCheck()->IsStringCharAt()
+ ? kQuickThrowStringBounds
+ : kQuickThrowArrayBounds;
+ x86_codegen->InvokeRuntime(entrypoint, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowStringBounds, void, int32_t, int32_t>();
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
@@ -192,10 +186,7 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
- x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
- instruction_,
- instruction_->GetDexPc(),
- this);
+ x86_codegen->InvokeRuntime(kQuickTestSuspend, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
if (successor_ == nullptr) {
__ jmp(GetReturnLabel());
@@ -237,10 +228,7 @@
InvokeRuntimeCallingConvention calling_convention;
const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
__ movl(calling_convention.GetRegisterAt(0), Immediate(string_index));
- x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
- instruction_,
- instruction_->GetDexPc(),
- this);
+ x86_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
RestoreLiveRegisters(codegen, locations);
@@ -272,8 +260,8 @@
InvokeRuntimeCallingConvention calling_convention;
__ movl(calling_convention.GetRegisterAt(0), Immediate(cls_->GetTypeIndex()));
- x86_codegen->InvokeRuntime(do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
- : QUICK_ENTRY_POINT(pInitializeType),
+ x86_codegen->InvokeRuntime(do_clinit_ ? kQuickInitializeStaticStorage
+ : kQuickInitializeType,
at_, dex_pc_, this);
if (do_clinit_) {
CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
@@ -342,7 +330,7 @@
Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
- x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
+ x86_codegen->InvokeRuntime(kQuickInstanceofNonTrivial,
instruction_,
instruction_->GetDexPc(),
this);
@@ -350,10 +338,7 @@
kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>();
} else {
DCHECK(instruction_->IsCheckCast());
- x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
- instruction_,
- instruction_->GetDexPc(),
- this);
+ x86_codegen->InvokeRuntime(kQuickCheckCast, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
}
@@ -384,11 +369,7 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, instruction_->GetLocations());
- x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
- instruction_,
- instruction_->GetDexPc(),
- this);
+ x86_codegen->InvokeRuntime(kQuickDeoptimize, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickDeoptimize, void, void>();
}
@@ -427,10 +408,7 @@
codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
- x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
- instruction_,
- instruction_->GetDexPc(),
- this);
+ x86_codegen->InvokeRuntime(kQuickAputObject, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
RestoreLiveRegisters(codegen, locations);
__ jmp(GetExitLabel());
@@ -648,10 +626,7 @@
codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
__ movl(calling_convention.GetRegisterAt(2), Immediate(offset_));
}
- x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pReadBarrierSlow),
- instruction_,
- instruction_->GetDexPc(),
- this);
+ x86_codegen->InvokeRuntime(kQuickReadBarrierSlow, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<
kQuickReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t>();
x86_codegen->Move32(out_, Location::RegisterLocation(EAX));
@@ -715,7 +690,7 @@
InvokeRuntimeCallingConvention calling_convention;
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
x86_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), root_);
- x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pReadBarrierForRootSlow),
+ x86_codegen->InvokeRuntime(kQuickReadBarrierForRootSlow,
instruction_,
instruction_->GetDexPc(),
this);
@@ -808,25 +783,21 @@
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- InvokeRuntime(GetThreadOffset<kX86PointerSize>(entrypoint).Int32Value(),
- instruction,
- dex_pc,
- slow_path);
-}
-
-void CodeGeneratorX86::InvokeRuntime(int32_t entry_point_offset,
- HInstruction* instruction,
- uint32_t dex_pc,
- SlowPathCode* slow_path) {
ValidateInvokeRuntime(instruction, slow_path);
- __ fs()->call(Address::Absolute(entry_point_offset));
- RecordPcInfo(instruction, dex_pc, slow_path);
+ GenerateInvokeRuntime(GetThreadOffset<kX86PointerSize>(entrypoint).Int32Value());
+ if (EntrypointRequiresStackMap(entrypoint)) {
+ RecordPcInfo(instruction, dex_pc, slow_path);
+ }
}
void CodeGeneratorX86::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
HInstruction* instruction,
SlowPathCode* slow_path) {
ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction, slow_path);
+ GenerateInvokeRuntime(entry_point_offset);
+}
+
+void CodeGeneratorX86::GenerateInvokeRuntime(int32_t entry_point_offset) {
__ fs()->call(Address::Absolute(entry_point_offset));
}
@@ -1527,6 +1498,7 @@
void LocationsBuilderX86::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
locations->SetInAt(0, Location::Any());
}
@@ -2597,19 +2569,13 @@
case Primitive::kPrimFloat:
// Processing a Dex `float-to-long' instruction.
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pF2l),
- conversion,
- conversion->GetDexPc(),
- nullptr);
+ codegen_->InvokeRuntime(kQuickF2l, conversion, conversion->GetDexPc());
CheckEntrypointTypes<kQuickF2l, int64_t, float>();
break;
case Primitive::kPrimDouble:
// Processing a Dex `double-to-long' instruction.
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pD2l),
- conversion,
- conversion->GetDexPc(),
- nullptr);
+ codegen_->InvokeRuntime(kQuickD2l, conversion, conversion->GetDexPc());
CheckEntrypointTypes<kQuickD2l, int64_t, double>();
break;
@@ -3455,16 +3421,10 @@
DCHECK_EQ(EDX, out.AsRegisterPairHigh<Register>());
if (is_div) {
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv),
- instruction,
- instruction->GetDexPc(),
- nullptr);
+ codegen_->InvokeRuntime(kQuickLdiv, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickLdiv, int64_t, int64_t, int64_t>();
} else {
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLmod),
- instruction,
- instruction->GetDexPc(),
- nullptr);
+ codegen_->InvokeRuntime(kQuickLmod, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickLmod, int64_t, int64_t, int64_t>();
}
break;
@@ -4044,10 +4004,7 @@
__ call(Address(temp, code_offset.Int32Value()));
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
- codegen_->InvokeRuntime(instruction->GetEntrypoint(),
- instruction,
- instruction->GetDexPc(),
- nullptr);
+ codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
DCHECK(!codegen_->IsLeafMethod());
}
@@ -4068,10 +4025,7 @@
__ movl(calling_convention.GetRegisterAt(0), Immediate(instruction->GetTypeIndex()));
// Note: if heap poisoning is enabled, the entry point takes cares
// of poisoning the reference.
- codegen_->InvokeRuntime(instruction->GetEntrypoint(),
- instruction,
- instruction->GetDexPc(),
- nullptr);
+ codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
DCHECK(!codegen_->IsLeafMethod());
}
@@ -4614,6 +4568,9 @@
kEmitCompilerReadBarrier ?
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
+ if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ }
locations->SetInAt(0, Location::RequiresRegister());
if (Primitive::IsFloatingPointType(instruction->GetType())) {
@@ -5078,6 +5035,9 @@
object_array_get_with_read_barrier ?
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
+ if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ }
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
if (Primitive::IsFloatingPointType(instruction->GetType())) {
@@ -5278,7 +5238,6 @@
}
if (needs_write_barrier) {
// Temporary registers for the write barrier.
- // These registers may be used for Baker read barriers too.
locations->AddTemp(Location::RequiresRegister()); // Possibly used for ref. poisoning too.
// Ensure the card is in a byte register.
locations->AddTemp(Location::RegisterLocation(ECX));
@@ -5368,105 +5327,40 @@
__ Bind(¬_null);
}
- if (kEmitCompilerReadBarrier) {
- if (!kUseBakerReadBarrier) {
- // When (non-Baker) read barriers are enabled, the type
- // checking instrumentation requires two read barriers
- // generated by CodeGeneratorX86::GenerateReadBarrierSlow:
- //
- // __ movl(temp2, temp);
- // // /* HeapReference<Class> */ temp = temp->component_type_
- // __ movl(temp, Address(temp, component_offset));
- // codegen_->GenerateReadBarrierSlow(
- // instruction, temp_loc, temp_loc, temp2_loc, component_offset);
- //
- // // /* HeapReference<Class> */ temp2 = register_value->klass_
- // __ movl(temp2, Address(register_value, class_offset));
- // codegen_->GenerateReadBarrierSlow(
- // instruction, temp2_loc, temp2_loc, value, class_offset, temp_loc);
- //
- // __ cmpl(temp, temp2);
- //
- // However, the second read barrier may trash `temp`, as it
- // is a temporary register, and as such would not be saved
- // along with live registers before calling the runtime (nor
- // restored afterwards). So in this case, we bail out and
- // delegate the work to the array set slow path.
- //
- // TODO: Extend the register allocator to support a new
- // "(locally) live temp" location so as to avoid always
- // going into the slow path when read barriers are enabled?
- //
- // There is no such problem with Baker read barriers (see below).
- __ jmp(slow_path->GetEntryLabel());
- } else {
- Location temp2_loc = locations->GetTemp(1);
- Register temp2 = temp2_loc.AsRegister<Register>();
- // /* HeapReference<Class> */ temp = array->klass_
- codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, temp_loc, array, class_offset, /* needs_null_check */ true);
+ // Note that when Baker read barriers are enabled, the type
+ // checks are performed without read barriers. This is fine,
+ // even in the case where a class object is in the from-space
+ // after the flip, as a comparison involving such a type would
+ // not produce a false positive; it may of course produce a
+ // false negative, in which case we would take the ArraySet
+ // slow path.
- // /* HeapReference<Class> */ temp = temp->component_type_
- codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, temp_loc, temp, component_offset, /* needs_null_check */ false);
- // Register `temp` is not trashed by the read barrier
- // emitted by GenerateFieldLoadWithBakerReadBarrier below,
- // as that method produces a call to a ReadBarrierMarkRegX
- // entry point, which saves all potentially live registers,
- // including temporaries such a `temp`.
- // /* HeapReference<Class> */ temp2 = register_value->klass_
- codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, temp2_loc, register_value, class_offset, /* needs_null_check */ false);
- // If heap poisoning is enabled, `temp` and `temp2` have
- // been unpoisoned by the the previous calls to
- // CodeGeneratorX86::GenerateFieldLoadWithBakerReadBarrier.
- __ cmpl(temp, temp2);
+ // /* HeapReference<Class> */ temp = array->klass_
+ __ movl(temp, Address(array, class_offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ MaybeUnpoisonHeapReference(temp);
- if (instruction->StaticTypeOfArrayIsObjectArray()) {
- __ j(kEqual, &do_put);
- // We do not need to emit a read barrier for the
- // following heap reference load, as `temp` is only used
- // in a comparison with null below, and this reference
- // is not kept afterwards. Also, if heap poisoning is
- // enabled, there is no need to unpoison that heap
- // reference for the same reason (comparison with null).
- __ cmpl(Address(temp, super_offset), Immediate(0));
- __ j(kNotEqual, slow_path->GetEntryLabel());
- __ Bind(&do_put);
- } else {
- __ j(kNotEqual, slow_path->GetEntryLabel());
- }
- }
- } else {
- // Non read barrier code.
+ // /* HeapReference<Class> */ temp = temp->component_type_
+ __ movl(temp, Address(temp, component_offset));
+ // If heap poisoning is enabled, no need to unpoison `temp`
+ // nor the object reference in `register_value->klass`, as
+ // we are comparing two poisoned references.
+ __ cmpl(temp, Address(register_value, class_offset));
- // /* HeapReference<Class> */ temp = array->klass_
- __ movl(temp, Address(array, class_offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ if (instruction->StaticTypeOfArrayIsObjectArray()) {
+ __ j(kEqual, &do_put);
+ // If heap poisoning is enabled, the `temp` reference has
+ // not been unpoisoned yet; unpoison it now.
__ MaybeUnpoisonHeapReference(temp);
- // /* HeapReference<Class> */ temp = temp->component_type_
- __ movl(temp, Address(temp, component_offset));
- // If heap poisoning is enabled, no need to unpoison `temp`
- // nor the object reference in `register_value->klass`, as
- // we are comparing two poisoned references.
- __ cmpl(temp, Address(register_value, class_offset));
-
- if (instruction->StaticTypeOfArrayIsObjectArray()) {
- __ j(kEqual, &do_put);
- // If heap poisoning is enabled, the `temp` reference has
- // not been unpoisoned yet; unpoison it now.
- __ MaybeUnpoisonHeapReference(temp);
-
- // If heap poisoning is enabled, no need to unpoison the
- // heap reference loaded below, as it is only used for a
- // comparison with null.
- __ cmpl(Address(temp, super_offset), Immediate(0));
- __ j(kNotEqual, slow_path->GetEntryLabel());
- __ Bind(&do_put);
- } else {
- __ j(kNotEqual, slow_path->GetEntryLabel());
- }
+ // If heap poisoning is enabled, no need to unpoison the
+ // heap reference loaded below, as it is only used for a
+ // comparison with null.
+ __ cmpl(Address(temp, super_offset), Immediate(0));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ Bind(&do_put);
+ } else {
+ __ j(kNotEqual, slow_path->GetEntryLabel());
}
}
@@ -5688,7 +5582,9 @@
}
void LocationsBuilderX86::VisitSuspendCheck(HSuspendCheck* instruction) {
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
}
void InstructionCodeGeneratorX86::VisitSuspendCheck(HSuspendCheck* instruction) {
@@ -5999,17 +5895,6 @@
HLoadClass::LoadKind CodeGeneratorX86::GetSupportedLoadClassKind(
HLoadClass::LoadKind desired_class_load_kind) {
- if (kEmitCompilerReadBarrier) {
- switch (desired_class_load_kind) {
- case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
- case HLoadClass::LoadKind::kBootImageAddress:
- // TODO: Implement for read barrier.
- return HLoadClass::LoadKind::kDexCacheViaMethod;
- default:
- break;
- }
- }
switch (desired_class_load_kind) {
case HLoadClass::LoadKind::kReferrersClass:
break;
@@ -6051,10 +5936,15 @@
return;
}
- LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || kEmitCompilerReadBarrier)
+ const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
+ LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ }
+
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
if (load_kind == HLoadClass::LoadKind::kReferrersClass ||
load_kind == HLoadClass::LoadKind::kDexCacheViaMethod ||
@@ -6069,10 +5959,7 @@
LocationSummary* locations = cls->GetLocations();
if (cls->NeedsAccessCheck()) {
codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex());
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInitializeTypeAndVerifyAccess),
- cls,
- cls->GetDexPc(),
- nullptr);
+ codegen_->InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
return;
}
@@ -6081,6 +5968,7 @@
Register out = out_loc.AsRegister<Register>();
bool generate_null_check = false;
+ const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
switch (cls->GetLoadKind()) {
case HLoadClass::LoadKind::kReferrersClass: {
DCHECK(!cls->CanCallRuntime());
@@ -6088,24 +5976,28 @@
// /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
Register current_method = locations->InAt(0).AsRegister<Register>();
GenerateGcRootFieldLoad(
- cls, out_loc, Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()));
+ cls,
+ out_loc,
+ Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()),
+ /*fixup_label*/ nullptr,
+ requires_read_barrier);
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimeAddress: {
- DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK(!requires_read_barrier);
__ movl(out, Immediate(/* placeholder */ 0));
codegen_->RecordTypePatch(cls);
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
- DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK(!requires_read_barrier);
Register method_address = locations->InAt(0).AsRegister<Register>();
__ leal(out, Address(method_address, CodeGeneratorX86::kDummy32BitOffset));
codegen_->RecordTypePatch(cls);
break;
}
case HLoadClass::LoadKind::kBootImageAddress: {
- DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK(!requires_read_barrier);
DCHECK_NE(cls->GetAddress(), 0u);
uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
__ movl(out, Immediate(address));
@@ -6116,7 +6008,11 @@
DCHECK_NE(cls->GetAddress(), 0u);
uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
// /* GcRoot<mirror::Class> */ out = *address
- GenerateGcRootFieldLoad(cls, out_loc, Address::Absolute(address));
+ GenerateGcRootFieldLoad(cls,
+ out_loc,
+ Address::Absolute(address),
+ /*fixup_label*/ nullptr,
+ requires_read_barrier);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -6125,8 +6021,11 @@
uint32_t offset = cls->GetDexCacheElementOffset();
Label* fixup_label = codegen_->NewPcRelativeDexCacheArrayPatch(cls->GetDexFile(), offset);
// /* GcRoot<mirror::Class> */ out = *(base + offset) /* PC-relative */
- GenerateGcRootFieldLoad(
- cls, out_loc, Address(base_reg, CodeGeneratorX86::kDummy32BitOffset), fixup_label);
+ GenerateGcRootFieldLoad(cls,
+ out_loc,
+ Address(base_reg, CodeGeneratorX86::kDummy32BitOffset),
+ fixup_label,
+ requires_read_barrier);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -6137,8 +6036,11 @@
__ movl(out, Address(current_method,
ArtMethod::DexCacheResolvedTypesOffset(kX86PointerSize).Int32Value()));
// /* GcRoot<mirror::Class> */ out = out[type_index]
- GenerateGcRootFieldLoad(
- cls, out_loc, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
+ GenerateGcRootFieldLoad(cls,
+ out_loc,
+ Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())),
+ /*fixup_label*/ nullptr,
+ requires_read_barrier);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -6236,6 +6138,10 @@
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
+ if (kUseBakerReadBarrier && !load->NeedsEnvironment()) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ }
+
HLoadString::LoadKind load_kind = load->GetLoadKind();
if (load_kind == HLoadString::LoadKind::kDexCacheViaMethod ||
load_kind == HLoadString::LoadKind::kBootImageLinkTimePcRelative ||
@@ -6313,10 +6219,7 @@
}
void InstructionCodeGeneratorX86::VisitThrow(HThrow* instruction) {
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pDeliverException),
- instruction,
- instruction->GetDexPc(),
- nullptr);
+ codegen_->InvokeRuntime(kQuickDeliverException, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
}
@@ -6331,6 +6234,7 @@
void LocationsBuilderX86::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
+ bool baker_read_barrier_slow_path = false;
switch (type_check_kind) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kAbstractClassCheck:
@@ -6338,6 +6242,7 @@
case TypeCheckKind::kArrayObjectCheck:
call_kind =
kEmitCompilerReadBarrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
+ baker_read_barrier_slow_path = kUseBakerReadBarrier;
break;
case TypeCheckKind::kArrayCheck:
case TypeCheckKind::kUnresolvedCheck:
@@ -6347,6 +6252,9 @@
}
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ if (baker_read_barrier_slow_path) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ }
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::Any());
// Note that TypeCheckSlowPathX86 uses this "out" register too.
@@ -6544,6 +6452,7 @@
LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
+ bool baker_read_barrier_slow_path = false;
switch (type_check_kind) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kAbstractClassCheck:
@@ -6552,6 +6461,7 @@
call_kind = (throws_into_catch || kEmitCompilerReadBarrier) ?
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall; // In fact, call on a fatal (non-returning) slow path.
+ baker_read_barrier_slow_path = kUseBakerReadBarrier && !throws_into_catch;
break;
case TypeCheckKind::kArrayCheck:
case TypeCheckKind::kUnresolvedCheck:
@@ -6560,6 +6470,9 @@
break;
}
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ if (baker_read_barrier_slow_path) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ }
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::Any());
// Note that TypeCheckSlowPathX86 uses this "temp" register too.
@@ -6761,11 +6674,10 @@
}
void InstructionCodeGeneratorX86::VisitMonitorOperation(HMonitorOperation* instruction) {
- codegen_->InvokeRuntime(instruction->IsEnter() ? QUICK_ENTRY_POINT(pLockObject)
- : QUICK_ENTRY_POINT(pUnlockObject),
+ codegen_->InvokeRuntime(instruction->IsEnter() ? kQuickLockObject
+ : kQuickUnlockObject,
instruction,
- instruction->GetDexPc(),
- nullptr);
+ instruction->GetDexPc());
if (instruction->IsEnter()) {
CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
} else {
@@ -6965,9 +6877,11 @@
void InstructionCodeGeneratorX86::GenerateGcRootFieldLoad(HInstruction* instruction,
Location root,
const Address& address,
- Label* fixup_label) {
+ Label* fixup_label,
+ bool requires_read_barrier) {
Register root_reg = root.AsRegister<Register>();
- if (kEmitCompilerReadBarrier) {
+ if (requires_read_barrier) {
+ DCHECK(kEmitCompilerReadBarrier);
if (kUseBakerReadBarrier) {
// Fast path implementation of art::ReadBarrier::BarrierForRoot when
// Baker's read barrier are used:
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index c644e40..e225098 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -263,7 +263,8 @@
void GenerateGcRootFieldLoad(HInstruction* instruction,
Location root,
const Address& address,
- Label* fixup_label = nullptr);
+ Label* fixup_label = nullptr,
+ bool requires_read_barrier = kEmitCompilerReadBarrier);
// Push value to FPU stack. `is_fp` specifies whether the value is floating point or not.
// `is_wide` specifies whether it is long/double or not.
@@ -327,12 +328,7 @@
void InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
- SlowPathCode* slow_path) OVERRIDE;
-
- void InvokeRuntime(int32_t entry_point_offset,
- HInstruction* instruction,
- uint32_t dex_pc,
- SlowPathCode* slow_path);
+ SlowPathCode* slow_path = nullptr) OVERRIDE;
// Generate code to invoke a runtime entry point, but do not record
// PC-related information in a stack map.
@@ -340,6 +336,8 @@
HInstruction* instruction,
SlowPathCode* slow_path);
+ void GenerateInvokeRuntime(int32_t entry_point_offset);
+
size_t GetWordSize() const OVERRIDE {
return kX86WordSize;
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 87b6de3..b3228f8 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -66,7 +66,7 @@
// Live registers will be restored in the catch block if caught.
SaveLiveRegisters(codegen, instruction_->GetLocations());
}
- x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
+ x86_64_codegen->InvokeRuntime(kQuickThrowNullPointer,
instruction_,
instruction_->GetDexPc(),
this);
@@ -92,10 +92,7 @@
// Live registers will be restored in the catch block if caught.
SaveLiveRegisters(codegen, instruction_->GetLocations());
}
- x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
- instruction_,
- instruction_->GetDexPc(),
- this);
+ x86_64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
@@ -149,10 +146,7 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
- x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
- instruction_,
- instruction_->GetDexPc(),
- this);
+ x86_64_codegen->InvokeRuntime(kQuickTestSuspend, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
if (successor_ == nullptr) {
__ jmp(GetReturnLabel());
@@ -219,13 +213,10 @@
length_loc,
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimInt);
- uint32_t entry_point_offset = instruction_->AsBoundsCheck()->IsStringCharAt()
- ? QUICK_ENTRY_POINT(pThrowStringBounds)
- : QUICK_ENTRY_POINT(pThrowArrayBounds);
- x86_64_codegen->InvokeRuntime(entry_point_offset,
- instruction_,
- instruction_->GetDexPc(),
- this);
+ QuickEntrypointEnum entrypoint = instruction_->AsBoundsCheck()->IsStringCharAt()
+ ? kQuickThrowStringBounds
+ : kQuickThrowArrayBounds;
+ x86_64_codegen->InvokeRuntime(entrypoint, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowStringBounds, void, int32_t, int32_t>();
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
@@ -257,9 +248,7 @@
InvokeRuntimeCallingConvention calling_convention;
__ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(cls_->GetTypeIndex()));
- x86_64_codegen->InvokeRuntime(do_clinit_ ?
- QUICK_ENTRY_POINT(pInitializeStaticStorage) :
- QUICK_ENTRY_POINT(pInitializeType),
+ x86_64_codegen->InvokeRuntime(do_clinit_ ? kQuickInitializeStaticStorage : kQuickInitializeType,
at_,
dex_pc_,
this);
@@ -314,7 +303,7 @@
InvokeRuntimeCallingConvention calling_convention;
const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
__ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(string_index));
- x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
+ x86_64_codegen->InvokeRuntime(kQuickResolveString,
instruction_,
instruction_->GetDexPc(),
this);
@@ -362,18 +351,12 @@
Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
- x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
- instruction_,
- dex_pc,
- this);
+ x86_64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this);
CheckEntrypointTypes<
kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>();
} else {
DCHECK(instruction_->IsCheckCast());
- x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
- instruction_,
- dex_pc,
- this);
+ x86_64_codegen->InvokeRuntime(kQuickCheckCast, instruction_, dex_pc, this);
CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
}
@@ -405,11 +388,7 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, instruction_->GetLocations());
- x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
- instruction_,
- instruction_->GetDexPc(),
- this);
+ x86_64_codegen->InvokeRuntime(kQuickDeoptimize, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickDeoptimize, void, void>();
}
@@ -448,10 +427,7 @@
codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
- x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
- instruction_,
- instruction_->GetDexPc(),
- this);
+ x86_64_codegen->InvokeRuntime(kQuickAputObject, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
RestoreLiveRegisters(codegen, locations);
__ jmp(GetExitLabel());
@@ -666,7 +642,7 @@
codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
__ movl(CpuRegister(calling_convention.GetRegisterAt(2)), Immediate(offset_));
}
- x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pReadBarrierSlow),
+ x86_64_codegen->InvokeRuntime(kQuickReadBarrierSlow,
instruction_,
instruction_->GetDexPc(),
this);
@@ -734,7 +710,7 @@
InvokeRuntimeCallingConvention calling_convention;
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
x86_64_codegen->Move(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), root_);
- x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pReadBarrierForRootSlow),
+ x86_64_codegen->InvokeRuntime(kQuickReadBarrierForRootSlow,
instruction_,
instruction_->GetDexPc(),
this);
@@ -1036,25 +1012,21 @@
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- InvokeRuntime(GetThreadOffset<kX86_64PointerSize>(entrypoint).Int32Value(),
- instruction,
- dex_pc,
- slow_path);
-}
-
-void CodeGeneratorX86_64::InvokeRuntime(int32_t entry_point_offset,
- HInstruction* instruction,
- uint32_t dex_pc,
- SlowPathCode* slow_path) {
ValidateInvokeRuntime(instruction, slow_path);
- __ gs()->call(Address::Absolute(entry_point_offset, /* no_rip */ true));
- RecordPcInfo(instruction, dex_pc, slow_path);
+ GenerateInvokeRuntime(GetThreadOffset<kX86_64PointerSize>(entrypoint).Int32Value());
+ if (EntrypointRequiresStackMap(entrypoint)) {
+ RecordPcInfo(instruction, dex_pc, slow_path);
+ }
}
void CodeGeneratorX86_64::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
HInstruction* instruction,
SlowPathCode* slow_path) {
ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction, slow_path);
+ GenerateInvokeRuntime(entry_point_offset);
+}
+
+void CodeGeneratorX86_64::GenerateInvokeRuntime(int32_t entry_point_offset) {
__ gs()->call(Address::Absolute(entry_point_offset, /* no_rip */ true));
}
@@ -1590,6 +1562,7 @@
void LocationsBuilderX86_64::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
locations->SetInAt(0, Location::Any());
}
@@ -3972,10 +3945,7 @@
__ call(Address(temp, code_offset.SizeValue()));
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
- codegen_->InvokeRuntime(instruction->GetEntrypoint(),
- instruction,
- instruction->GetDexPc(),
- nullptr);
+ codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
DCHECK(!codegen_->IsLeafMethod());
}
@@ -3997,10 +3967,7 @@
instruction->GetTypeIndex());
// Note: if heap poisoning is enabled, the entry point takes cares
// of poisoning the reference.
- codegen_->InvokeRuntime(instruction->GetEntrypoint(),
- instruction,
- instruction->GetDexPc(),
- nullptr);
+ codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
DCHECK(!codegen_->IsLeafMethod());
@@ -4147,6 +4114,9 @@
object_field_get_with_read_barrier ?
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
+ if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ }
locations->SetInAt(0, Location::RequiresRegister());
if (Primitive::IsFloatingPointType(instruction->GetType())) {
locations->SetOut(Location::RequiresFpuRegister());
@@ -4576,6 +4546,9 @@
object_array_get_with_read_barrier ?
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
+ if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ }
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
if (Primitive::IsFloatingPointType(instruction->GetType())) {
@@ -4759,7 +4732,6 @@
if (needs_write_barrier) {
// Temporary registers for the write barrier.
- // These registers may be used for Baker read barriers too.
locations->AddTemp(Location::RequiresRegister()); // Possibly used for ref. poisoning too.
locations->AddTemp(Location::RequiresRegister());
}
@@ -4849,105 +4821,40 @@
__ Bind(¬_null);
}
- if (kEmitCompilerReadBarrier) {
- if (!kUseBakerReadBarrier) {
- // When (non-Baker) read barriers are enabled, the type
- // checking instrumentation requires two read barriers
- // generated by CodeGeneratorX86_64::GenerateReadBarrierSlow:
- //
- // __ movl(temp2, temp);
- // // /* HeapReference<Class> */ temp = temp->component_type_
- // __ movl(temp, Address(temp, component_offset));
- // codegen_->GenerateReadBarrierSlow(
- // instruction, temp_loc, temp_loc, temp2_loc, component_offset);
- //
- // // /* HeapReference<Class> */ temp2 = register_value->klass_
- // __ movl(temp2, Address(register_value, class_offset));
- // codegen_->GenerateReadBarrierSlow(
- // instruction, temp2_loc, temp2_loc, value, class_offset, temp_loc);
- //
- // __ cmpl(temp, temp2);
- //
- // However, the second read barrier may trash `temp`, as it
- // is a temporary register, and as such would not be saved
- // along with live registers before calling the runtime (nor
- // restored afterwards). So in this case, we bail out and
- // delegate the work to the array set slow path.
- //
- // TODO: Extend the register allocator to support a new
- // "(locally) live temp" location so as to avoid always
- // going into the slow path when read barriers are enabled?
- //
- // There is no such problem with Baker read barriers (see below).
- __ jmp(slow_path->GetEntryLabel());
- } else {
- Location temp2_loc = locations->GetTemp(1);
- CpuRegister temp2 = temp2_loc.AsRegister<CpuRegister>();
- // /* HeapReference<Class> */ temp = array->klass_
- codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, temp_loc, array, class_offset, /* needs_null_check */ true);
+ // Note that when Baker read barriers are enabled, the type
+ // checks are performed without read barriers. This is fine,
+ // even in the case where a class object is in the from-space
+ // after the flip, as a comparison involving such a type would
+ // not produce a false positive; it may of course produce a
+ // false negative, in which case we would take the ArraySet
+ // slow path.
- // /* HeapReference<Class> */ temp = temp->component_type_
- codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, temp_loc, temp, component_offset, /* needs_null_check */ false);
- // Register `temp` is not trashed by the read barrier
- // emitted by GenerateFieldLoadWithBakerReadBarrier below,
- // as that method produces a call to a ReadBarrierMarkRegX
- // entry point, which saves all potentially live registers,
- // including temporaries such a `temp`.
- // /* HeapReference<Class> */ temp2 = register_value->klass_
- codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, temp2_loc, register_value, class_offset, /* needs_null_check */ false);
- // If heap poisoning is enabled, `temp` and `temp2` have
- // been unpoisoned by the the previous calls to
- // CodeGeneratorX86_64::GenerateFieldLoadWithBakerReadBarrier.
- __ cmpl(temp, temp2);
+ // /* HeapReference<Class> */ temp = array->klass_
+ __ movl(temp, Address(array, class_offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ MaybeUnpoisonHeapReference(temp);
- if (instruction->StaticTypeOfArrayIsObjectArray()) {
- __ j(kEqual, &do_put);
- // We do not need to emit a read barrier for the
- // following heap reference load, as `temp` is only used
- // in a comparison with null below, and this reference
- // is not kept afterwards. Also, if heap poisoning is
- // enabled, there is no need to unpoison that heap
- // reference for the same reason (comparison with null).
- __ cmpl(Address(temp, super_offset), Immediate(0));
- __ j(kNotEqual, slow_path->GetEntryLabel());
- __ Bind(&do_put);
- } else {
- __ j(kNotEqual, slow_path->GetEntryLabel());
- }
- }
- } else {
- // Non read barrier code.
+ // /* HeapReference<Class> */ temp = temp->component_type_
+ __ movl(temp, Address(temp, component_offset));
+ // If heap poisoning is enabled, no need to unpoison `temp`
+ // nor the object reference in `register_value->klass`, as
+ // we are comparing two poisoned references.
+ __ cmpl(temp, Address(register_value, class_offset));
- // /* HeapReference<Class> */ temp = array->klass_
- __ movl(temp, Address(array, class_offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ if (instruction->StaticTypeOfArrayIsObjectArray()) {
+ __ j(kEqual, &do_put);
+ // If heap poisoning is enabled, the `temp` reference has
+ // not been unpoisoned yet; unpoison it now.
__ MaybeUnpoisonHeapReference(temp);
- // /* HeapReference<Class> */ temp = temp->component_type_
- __ movl(temp, Address(temp, component_offset));
- // If heap poisoning is enabled, no need to unpoison `temp`
- // nor the object reference in `register_value->klass`, as
- // we are comparing two poisoned references.
- __ cmpl(temp, Address(register_value, class_offset));
-
- if (instruction->StaticTypeOfArrayIsObjectArray()) {
- __ j(kEqual, &do_put);
- // If heap poisoning is enabled, the `temp` reference has
- // not been unpoisoned yet; unpoison it now.
- __ MaybeUnpoisonHeapReference(temp);
-
- // If heap poisoning is enabled, no need to unpoison the
- // heap reference loaded below, as it is only used for a
- // comparison with null.
- __ cmpl(Address(temp, super_offset), Immediate(0));
- __ j(kNotEqual, slow_path->GetEntryLabel());
- __ Bind(&do_put);
- } else {
- __ j(kNotEqual, slow_path->GetEntryLabel());
- }
+ // If heap poisoning is enabled, no need to unpoison the
+ // heap reference loaded below, as it is only used for a
+ // comparison with null.
+ __ cmpl(Address(temp, super_offset), Immediate(0));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ Bind(&do_put);
+ } else {
+ __ j(kNotEqual, slow_path->GetEntryLabel());
}
}
@@ -5173,7 +5080,9 @@
}
void LocationsBuilderX86_64::VisitSuspendCheck(HSuspendCheck* instruction) {
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
}
void InstructionCodeGeneratorX86_64::VisitSuspendCheck(HSuspendCheck* instruction) {
@@ -5433,17 +5342,6 @@
HLoadClass::LoadKind CodeGeneratorX86_64::GetSupportedLoadClassKind(
HLoadClass::LoadKind desired_class_load_kind) {
- if (kEmitCompilerReadBarrier) {
- switch (desired_class_load_kind) {
- case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
- case HLoadClass::LoadKind::kBootImageAddress:
- // TODO: Implement for read barrier.
- return HLoadClass::LoadKind::kDexCacheViaMethod;
- default:
- break;
- }
- }
switch (desired_class_load_kind) {
case HLoadClass::LoadKind::kReferrersClass:
break;
@@ -5479,10 +5377,15 @@
return;
}
- LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || kEmitCompilerReadBarrier)
+ const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
+ LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ }
+
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
if (load_kind == HLoadClass::LoadKind::kReferrersClass ||
load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
@@ -5495,10 +5398,7 @@
LocationSummary* locations = cls->GetLocations();
if (cls->NeedsAccessCheck()) {
codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex());
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInitializeTypeAndVerifyAccess),
- cls,
- cls->GetDexPc(),
- nullptr);
+ codegen_->InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
return;
}
@@ -5506,6 +5406,7 @@
Location out_loc = locations->Out();
CpuRegister out = out_loc.AsRegister<CpuRegister>();
+ const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
bool generate_null_check = false;
switch (cls->GetLoadKind()) {
case HLoadClass::LoadKind::kReferrersClass: {
@@ -5514,16 +5415,20 @@
// /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
CpuRegister current_method = locations->InAt(0).AsRegister<CpuRegister>();
GenerateGcRootFieldLoad(
- cls, out_loc, Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()));
+ cls,
+ out_loc,
+ Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()),
+ /*fixup_label*/nullptr,
+ requires_read_barrier);
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
- DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK(!requires_read_barrier);
__ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
codegen_->RecordTypePatch(cls);
break;
case HLoadClass::LoadKind::kBootImageAddress: {
- DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK(!requires_read_barrier);
DCHECK_NE(cls->GetAddress(), 0u);
uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
__ movl(out, Immediate(address)); // Zero-extended.
@@ -5535,11 +5440,19 @@
// /* GcRoot<mirror::Class> */ out = *address
if (IsUint<32>(cls->GetAddress())) {
Address address = Address::Absolute(cls->GetAddress(), /* no_rip */ true);
- GenerateGcRootFieldLoad(cls, out_loc, address);
+ GenerateGcRootFieldLoad(cls,
+ out_loc,
+ address,
+ /*fixup_label*/nullptr,
+ requires_read_barrier);
} else {
// TODO: Consider using opcode A1, i.e. movl eax, moff32 (with 64-bit address).
__ movq(out, Immediate(cls->GetAddress()));
- GenerateGcRootFieldLoad(cls, out_loc, Address(out, 0));
+ GenerateGcRootFieldLoad(cls,
+ out_loc,
+ Address(out, 0),
+ /*fixup_label*/nullptr,
+ requires_read_barrier);
}
generate_null_check = !cls->IsInDexCache();
break;
@@ -5550,7 +5463,7 @@
Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
/* no_rip */ false);
// /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
- GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label);
+ GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, requires_read_barrier);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -5563,7 +5476,11 @@
ArtMethod::DexCacheResolvedTypesOffset(kX86_64PointerSize).Int32Value()));
// /* GcRoot<mirror::Class> */ out = out[type_index]
GenerateGcRootFieldLoad(
- cls, out_loc, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
+ cls,
+ out_loc,
+ Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())),
+ /*fixup_label*/nullptr,
+ requires_read_barrier);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -5647,6 +5564,10 @@
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
+ if (kUseBakerReadBarrier && !load->NeedsEnvironment()) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ }
+
if (load->GetLoadKind() == HLoadString::LoadKind::kDexCacheViaMethod) {
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -5715,10 +5636,7 @@
}
void InstructionCodeGeneratorX86_64::VisitThrow(HThrow* instruction) {
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pDeliverException),
- instruction,
- instruction->GetDexPc(),
- nullptr);
+ codegen_->InvokeRuntime(kQuickDeliverException, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
}
@@ -5733,6 +5651,7 @@
void LocationsBuilderX86_64::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
+ bool baker_read_barrier_slow_path = false;
switch (type_check_kind) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kAbstractClassCheck:
@@ -5740,6 +5659,7 @@
case TypeCheckKind::kArrayObjectCheck:
call_kind =
kEmitCompilerReadBarrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
+ baker_read_barrier_slow_path = kUseBakerReadBarrier;
break;
case TypeCheckKind::kArrayCheck:
case TypeCheckKind::kUnresolvedCheck:
@@ -5749,6 +5669,9 @@
}
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ if (baker_read_barrier_slow_path) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ }
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::Any());
// Note that TypeCheckSlowPathX86_64 uses this "out" register too.
@@ -5951,6 +5874,7 @@
LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
+ bool baker_read_barrier_slow_path = false;
switch (type_check_kind) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kAbstractClassCheck:
@@ -5959,6 +5883,7 @@
call_kind = (throws_into_catch || kEmitCompilerReadBarrier) ?
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall; // In fact, call on a fatal (non-returning) slow path.
+ baker_read_barrier_slow_path = kUseBakerReadBarrier && !throws_into_catch;
break;
case TypeCheckKind::kArrayCheck:
case TypeCheckKind::kUnresolvedCheck:
@@ -5967,6 +5892,9 @@
break;
}
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ if (baker_read_barrier_slow_path) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ }
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::Any());
// Note that TypeCheckSlowPathX86_64 uses this "temp" register too.
@@ -6216,11 +6144,9 @@
}
void InstructionCodeGeneratorX86_64::VisitMonitorOperation(HMonitorOperation* instruction) {
- codegen_->InvokeRuntime(instruction->IsEnter() ? QUICK_ENTRY_POINT(pLockObject)
- : QUICK_ENTRY_POINT(pUnlockObject),
+ codegen_->InvokeRuntime(instruction->IsEnter() ? kQuickLockObject : kQuickUnlockObject,
instruction,
- instruction->GetDexPc(),
- nullptr);
+ instruction->GetDexPc());
if (instruction->IsEnter()) {
CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
} else {
@@ -6402,9 +6328,11 @@
void InstructionCodeGeneratorX86_64::GenerateGcRootFieldLoad(HInstruction* instruction,
Location root,
const Address& address,
- Label* fixup_label) {
+ Label* fixup_label,
+ bool requires_read_barrier) {
CpuRegister root_reg = root.AsRegister<CpuRegister>();
- if (kEmitCompilerReadBarrier) {
+ if (requires_read_barrier) {
+ DCHECK(kEmitCompilerReadBarrier);
if (kUseBakerReadBarrier) {
// Fast path implementation of art::ReadBarrier::BarrierForRoot when
// Baker's read barrier are used:
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 44844ac..d939083 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -257,7 +257,8 @@
void GenerateGcRootFieldLoad(HInstruction* instruction,
Location root,
const Address& address,
- Label* fixup_label = nullptr);
+ Label* fixup_label = nullptr,
+ bool requires_read_barrier = kEmitCompilerReadBarrier);
void PushOntoFPStack(Location source, uint32_t temp_offset,
uint32_t stack_adjustment, bool is_float);
@@ -309,12 +310,7 @@
void InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
- SlowPathCode* slow_path) OVERRIDE;
-
- void InvokeRuntime(int32_t entry_point_offset,
- HInstruction* instruction,
- uint32_t dex_pc,
- SlowPathCode* slow_path);
+ SlowPathCode* slow_path = nullptr) OVERRIDE;
// Generate code to invoke a runtime entry point, but do not record
// PC-related information in a stack map.
@@ -322,6 +318,8 @@
HInstruction* instruction,
SlowPathCode* slow_path);
+ void GenerateInvokeRuntime(int32_t entry_point_offset);
+
size_t GetWordSize() const OVERRIDE {
return kX86_64WordSize;
}
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index fe6c0a3..d00a786 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -991,4 +991,67 @@
}
}
+#ifdef ART_ENABLE_CODEGEN_mips
+TEST_F(CodegenTest, MipsClobberRA) {
+ std::unique_ptr<const MipsInstructionSetFeatures> features_mips(
+ MipsInstructionSetFeatures::FromCppDefines());
+ if (!CanExecute(kMips) || features_mips->IsR6()) {
+ // HMipsComputeBaseMethodAddress and the NAL instruction behind it
+ // should only be generated on non-R6.
+ return;
+ }
+
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ HGraph* graph = CreateGraph(&allocator);
+
+ HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(entry_block);
+ graph->SetEntryBlock(entry_block);
+ entry_block->AddInstruction(new (&allocator) HGoto());
+
+ HBasicBlock* block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(block);
+
+ HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(exit_block);
+ graph->SetExitBlock(exit_block);
+ exit_block->AddInstruction(new (&allocator) HExit());
+
+ entry_block->AddSuccessor(block);
+ block->AddSuccessor(exit_block);
+
+ // To simplify matters, don't create PC-relative HLoadClass or HLoadString.
+ // Instead, generate HMipsComputeBaseMethodAddress directly.
+ HMipsComputeBaseMethodAddress* base = new (&allocator) HMipsComputeBaseMethodAddress();
+ block->AddInstruction(base);
+ // HMipsComputeBaseMethodAddress is defined as int, so just make the
+ // compiled method return it.
+ block->AddInstruction(new (&allocator) HReturn(base));
+
+ graph->BuildDominatorTree();
+
+ mips::CodeGeneratorMIPS codegenMIPS(graph, *features_mips.get(), CompilerOptions());
+ // Since there isn't HLoadClass or HLoadString, we need to manually indicate
+ // that RA is clobbered and the method entry code should generate a stack frame
+ // and preserve RA in it. And this is what we're testing here.
+ codegenMIPS.ClobberRA();
+ // Without ClobberRA() the code would be:
+ // nal # Sets RA to point to the jr instruction below
+ // move v0, ra # and the CPU falls into an infinite loop.
+ // jr ra
+ // nop
+ // The expected code is:
+ // addiu sp, sp, -16
+ // sw ra, 12(sp)
+ // sw a0, 0(sp)
+ // nal # Sets RA to point to the lw instruction below.
+ // move v0, ra
+ // lw ra, 12(sp)
+ // jr ra
+ // addiu sp, sp, 16
+ RunCode(&codegenMIPS, graph, [](HGraph*) {}, false, 0);
+}
+#endif
+
} // namespace art
diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc
index 0614945..5f39a49 100644
--- a/compiler/optimizing/constant_folding.cc
+++ b/compiler/optimizing/constant_folding.cc
@@ -47,6 +47,9 @@
private:
void VisitShift(HBinaryOperation* shift);
+ void VisitEqual(HEqual* instruction) OVERRIDE;
+ void VisitNotEqual(HNotEqual* instruction) OVERRIDE;
+
void VisitAbove(HAbove* instruction) OVERRIDE;
void VisitAboveOrEqual(HAboveOrEqual* instruction) OVERRIDE;
void VisitBelow(HBelow* instruction) OVERRIDE;
@@ -140,6 +143,30 @@
}
}
+void InstructionWithAbsorbingInputSimplifier::VisitEqual(HEqual* instruction) {
+ if ((instruction->GetLeft()->IsNullConstant() && !instruction->GetRight()->CanBeNull()) ||
+ (instruction->GetRight()->IsNullConstant() && !instruction->GetLeft()->CanBeNull())) {
+ // Replace code looking like
+ // EQUAL lhs, null
+ // where lhs cannot be null with
+ // CONSTANT false
+ instruction->ReplaceWith(GetGraph()->GetConstant(Primitive::kPrimBoolean, 0));
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ }
+}
+
+void InstructionWithAbsorbingInputSimplifier::VisitNotEqual(HNotEqual* instruction) {
+ if ((instruction->GetLeft()->IsNullConstant() && !instruction->GetRight()->CanBeNull()) ||
+ (instruction->GetRight()->IsNullConstant() && !instruction->GetLeft()->CanBeNull())) {
+ // Replace code looking like
+ // NOT_EQUAL lhs, null
+ // where lhs cannot be null with
+ // CONSTANT true
+ instruction->ReplaceWith(GetGraph()->GetConstant(Primitive::kPrimBoolean, 1));
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ }
+}
+
void InstructionWithAbsorbingInputSimplifier::VisitAbove(HAbove* instruction) {
if (instruction->GetLeft()->IsConstant() &&
instruction->GetLeft()->AsConstant()->IsArithmeticZero()) {
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 451aa38..ce53134 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -109,7 +109,7 @@
}
static bool IsMethodOrDeclaringClassFinal(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return method->IsFinal() || method->GetDeclaringClass()->IsFinal();
}
@@ -119,7 +119,7 @@
* Return nullptr if the runtime target cannot be proven.
*/
static ArtMethod* FindVirtualOrInterfaceTarget(HInvoke* invoke, ArtMethod* resolved_method)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (IsMethodOrDeclaringClassFinal(resolved_method)) {
// No need to lookup further, the resolved method will be the target.
return resolved_method;
@@ -189,7 +189,7 @@
static uint32_t FindMethodIndexIn(ArtMethod* method,
const DexFile& dex_file,
uint32_t name_and_signature_index)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (IsSameDexFile(*method->GetDexFile(), dex_file)) {
return method->GetDexMethodIndex();
} else {
@@ -200,7 +200,7 @@
static uint32_t FindClassIndexIn(mirror::Class* cls,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t index = DexFile::kDexNoIndex;
if (cls->GetDexCache() == nullptr) {
DCHECK(cls->IsArrayClass()) << PrettyClass(cls);
@@ -452,7 +452,8 @@
is_referrer,
invoke_instruction->GetDexPc(),
/* needs_access_check */ false,
- /* is_in_dex_cache */ true);
+ /* is_in_dex_cache */ true,
+ /* is_in_boot_image */ false);
HNotEqual* compare = new (graph_->GetArena()) HNotEqual(load_class, receiver_class);
// TODO: Extend reference type propagation to understand the guard.
@@ -894,7 +895,7 @@
static HInstruction* GetInvokeInputForArgVRegIndex(HInvoke* invoke_instruction,
size_t arg_vreg_index)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
size_t input_index = 0;
for (size_t i = 0; i < arg_vreg_index; ++i, ++input_index) {
DCHECK_LT(input_index, invoke_instruction->GetNumberOfArguments());
@@ -1030,7 +1031,7 @@
HInstanceFieldGet* HInliner::CreateInstanceFieldGet(Handle<mirror::DexCache> dex_cache,
uint32_t field_index,
HInstruction* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
ArtField* resolved_field = dex_cache->GetResolvedField(field_index, pointer_size);
DCHECK(resolved_field != nullptr);
@@ -1058,7 +1059,7 @@
uint32_t field_index,
HInstruction* obj,
HInstruction* value)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
ArtField* resolved_field = dex_cache->GetResolvedField(field_index, pointer_size);
DCHECK(resolved_field != nullptr);
@@ -1374,7 +1375,7 @@
static bool IsReferenceTypeRefinement(ReferenceTypeInfo declared_rti,
bool declared_can_be_null,
HInstruction* actual_obj)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (declared_can_be_null && !actual_obj->CanBeNull()) {
return true;
}
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 02d3a5f..486626b 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -64,12 +64,12 @@
// reference type propagation can run after the inlining. If the inlining is successful, this
// method will replace and remove the `invoke_instruction`.
bool TryInlineAndReplace(HInvoke* invoke_instruction, ArtMethod* resolved_method, bool do_rtp)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool TryBuildAndInline(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
HInstruction** return_replacement)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool TryBuildAndInlineHelper(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
@@ -86,7 +86,7 @@
bool TryPatternSubstitution(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
HInstruction** return_replacement)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Create a new HInstanceFieldGet.
HInstanceFieldGet* CreateInstanceFieldGet(Handle<mirror::DexCache> dex_cache,
@@ -105,38 +105,38 @@
bool TryInlineMonomorphicCall(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
const InlineCache& ic)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Try to inline targets of a polymorphic call.
bool TryInlinePolymorphicCall(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
const InlineCache& ic)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
const InlineCache& ic)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
HInstanceFieldGet* BuildGetReceiverClass(ClassLinker* class_linker,
HInstruction* receiver,
uint32_t dex_pc) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void FixUpReturnReferenceType(ArtMethod* resolved_method, HInstruction* return_replacement)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Creates an instance of ReferenceTypeInfo from `klass` if `klass` is
// admissible (see ReferenceTypePropagation::IsAdmissible for details).
// Otherwise returns inexact Object RTI.
- ReferenceTypeInfo GetClassRTI(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ ReferenceTypeInfo GetClassRTI(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
bool ArgumentTypesMoreSpecific(HInvoke* invoke_instruction, ArtMethod* resolved_method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool ReturnTypeMoreSpecific(HInvoke* invoke_instruction, HInstruction* return_replacement)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Add a type guard on the given `receiver`. This will add to the graph:
// i0 = HFieldGet(receiver, klass)
@@ -154,7 +154,7 @@
bool is_referrer,
HInstruction* invoke_instruction,
bool with_deoptimization)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Ad-hoc implementation for implementing a diamond pattern in the graph for
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index e5dab56..d7e4c53 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -935,7 +935,8 @@
IsOutermostCompilingClass(type_index),
dex_pc,
needs_access_check,
- /* is_in_dex_cache */ false);
+ /* is_in_dex_cache */ false,
+ /* is_in_boot_image */ false);
AppendInstruction(load_class);
HInstruction* cls = load_class;
@@ -957,7 +958,7 @@
}
static bool IsSubClass(mirror::Class* to_test, mirror::Class* super_class)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return to_test != nullptr && !to_test->IsInterface() && to_test->IsSubClass(super_class);
}
@@ -1026,7 +1027,8 @@
is_outer_class,
dex_pc,
/*needs_access_check*/ false,
- /* is_in_dex_cache */ false);
+ /* is_in_dex_cache */ false,
+ /* is_in_boot_image */ false);
AppendInstruction(load_class);
clinit_check = new (arena_) HClinitCheck(load_class, dex_pc);
AppendInstruction(clinit_check);
@@ -1384,7 +1386,8 @@
is_outer_class,
dex_pc,
/*needs_access_check*/ false,
- /* is_in_dex_cache */ false);
+ /* is_in_dex_cache */ false,
+ /* is_in_boot_image */ false);
AppendInstruction(constant);
HInstruction* cls = constant;
@@ -1545,8 +1548,6 @@
void HInstructionBuilder::BuildFillArrayData(const Instruction& instruction, uint32_t dex_pc) {
HInstruction* array = LoadNullCheckedLocal(instruction.VRegA_31t(), dex_pc);
- HInstruction* length = new (arena_) HArrayLength(array, dex_pc);
- AppendInstruction(length);
int32_t payload_offset = instruction.VRegB_31t() + dex_pc;
const Instruction::ArrayDataPayload* payload =
@@ -1554,6 +1555,14 @@
const uint8_t* data = payload->data;
uint32_t element_count = payload->element_count;
+ if (element_count == 0u) {
+ // For empty payload we emit only the null check above.
+ return;
+ }
+
+ HInstruction* length = new (arena_) HArrayLength(array, dex_pc);
+ AppendInstruction(length);
+
// Implementation of this DEX instruction seems to be that the bounds check is
// done before doing any stores.
HInstruction* last_index = graph_->GetIntConstant(payload->element_count - 1, dex_pc);
@@ -1607,7 +1616,7 @@
}
static TypeCheckKind ComputeTypeCheckKind(Handle<mirror::Class> cls)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (cls.Get() == nullptr) {
return TypeCheckKind::kUnresolvedCheck;
} else if (cls->IsInterface()) {
@@ -1653,7 +1662,8 @@
IsOutermostCompilingClass(type_index),
dex_pc,
!can_access,
- /* is_in_dex_cache */ false);
+ /* is_in_dex_cache */ false,
+ /* is_in_boot_image */ false);
AppendInstruction(cls);
TypeCheckKind check_kind = ComputeTypeCheckKind(resolved_class);
@@ -2628,7 +2638,8 @@
IsOutermostCompilingClass(type_index),
dex_pc,
!can_access,
- /* is_in_dex_cache */ false));
+ /* is_in_dex_cache */ false,
+ /* is_in_boot_image */ false));
UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
break;
}
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index 517cf76..aa34ddd 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -103,7 +103,7 @@
bool NeedsAccessCheck(uint32_t type_index,
Handle<mirror::DexCache> dex_cache,
/*out*/bool* finalizable) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool NeedsAccessCheck(uint32_t type_index, /*out*/bool* finalizable) const;
template<typename T>
@@ -255,14 +255,14 @@
ArtMethod* method,
uint32_t method_idx,
HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Build a HNewInstance instruction.
bool BuildNewInstance(uint16_t type_index, uint32_t dex_pc);
// Return whether the compiler can assume `cls` is initialized.
bool IsInitialized(Handle<mirror::Class> cls) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Try to resolve a method using the class linker. Return null if a method could
// not be resolved.
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index a1fc7c3..67640a1 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -656,6 +656,9 @@
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall,
kIntrinsified);
+ if (can_call && kUseBakerReadBarrier) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ }
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 7482057..082076d 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -894,6 +894,9 @@
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall,
kIntrinsified);
+ if (can_call && kUseBakerReadBarrier) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ }
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 6e5eb66..5239f8f 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -634,7 +634,7 @@
// For 64-bit quantities, this algorithm gets executed twice, (once
// for in_lo, and again for in_hi), but saves a few instructions
// because the mask values only have to be loaded once. Using this
- // algorithm the count for a 64-bit operand can be performed in 33
+ // algorithm the count for a 64-bit operand can be performed in 29
// instructions compared to a loop-based algorithm which required 47
// instructions.
@@ -687,37 +687,36 @@
__ Srl(tmp_lo, tmp_lo, 2);
__ And(tmp_lo, tmp_lo, AT);
__ Addu(tmp_lo, out_lo, tmp_lo);
- __ Srl(out_lo, tmp_lo, 4);
- __ Addu(out_lo, out_lo, tmp_lo);
__ And(out_hi, tmp_hi, AT);
__ Srl(tmp_hi, tmp_hi, 2);
__ And(tmp_hi, tmp_hi, AT);
__ Addu(tmp_hi, out_hi, tmp_hi);
- __ Srl(out_hi, tmp_hi, 4);
- __ Addu(out_hi, out_hi, tmp_hi);
+ // Here we deviate from the original algorithm a bit. We've reached
+ // the stage where the bitfields holding the subtotals are large
+ // enough to hold the combined subtotals for both the low word, and
+ // the high word. This means that we can add the subtotals for the
+ // the high, and low words into a single word, and compute the final
+ // result for both the high, and low words using fewer instructions.
__ LoadConst32(AT, 0x0F0F0F0F);
- __ And(out_lo, out_lo, AT);
- __ And(out_hi, out_hi, AT);
+ __ Addu(TMP, tmp_hi, tmp_lo);
+
+ __ Srl(out, TMP, 4);
+ __ And(out, out, AT);
+ __ And(TMP, TMP, AT);
+ __ Addu(out, out, TMP);
__ LoadConst32(AT, 0x01010101);
if (isR6) {
- __ MulR6(out_lo, out_lo, AT);
-
- __ MulR6(out_hi, out_hi, AT);
+ __ MulR6(out, out, AT);
} else {
- __ MulR2(out_lo, out_lo, AT);
-
- __ MulR2(out_hi, out_hi, AT);
+ __ MulR2(out, out, AT);
}
- __ Srl(out_lo, out_lo, 24);
- __ Srl(out_hi, out_hi, 24);
-
- __ Addu(out, out_hi, out_lo);
+ __ Srl(out, out, 24);
}
}
@@ -1875,7 +1874,7 @@
// int java.lang.String.compareTo(String anotherString)
void IntrinsicLocationsBuilderMIPS::VisitStringCompareTo(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -1895,13 +1894,7 @@
SlowPathCodeMIPS* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS(invoke);
codegen_->AddSlowPath(slow_path);
__ Beqz(argument, slow_path->GetEntryLabel());
-
- __ LoadFromOffset(kLoadWord,
- T9,
- TR,
- QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, pStringCompareTo).Int32Value());
- __ Jalr(T9);
- __ Nop();
+ codegen_->InvokeRuntime(kQuickStringCompareTo, invoke, invoke->GetDexPc(), slow_path);
__ Bind(slow_path->GetExitLabel());
}
@@ -2055,13 +2048,7 @@
__ Clear(tmp_reg);
}
- __ LoadFromOffset(kLoadWord,
- T9,
- TR,
- QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, pIndexOf).Int32Value());
- __ Jalr(T9);
- __ Nop();
-
+ codegen->InvokeRuntime(kQuickIndexOf, invoke, invoke->GetDexPc(), slow_path);
if (slow_path != nullptr) {
__ Bind(slow_path->GetExitLabel());
}
@@ -2140,14 +2127,7 @@
SlowPathCodeMIPS* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS(invoke);
codegen_->AddSlowPath(slow_path);
__ Beqz(byte_array, slow_path->GetEntryLabel());
-
- __ LoadFromOffset(kLoadWord,
- T9,
- TR,
- QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, pAllocStringFromBytes).Int32Value());
- __ Jalr(T9);
- __ Nop();
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ codegen_->InvokeRuntime(kQuickAllocStringFromBytes, invoke, invoke->GetDexPc(), slow_path);
__ Bind(slow_path->GetExitLabel());
}
@@ -2165,22 +2145,13 @@
}
void IntrinsicCodeGeneratorMIPS::VisitStringNewStringFromChars(HInvoke* invoke) {
- MipsAssembler* assembler = GetAssembler();
-
// No need to emit code checking whether `locations->InAt(2)` is a null
// pointer, as callers of the native method
//
// java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
//
// all include a null check on `data` before calling that method.
-
- __ LoadFromOffset(kLoadWord,
- T9,
- TR,
- QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, pAllocStringFromChars).Int32Value());
- __ Jalr(T9);
- __ Nop();
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ codegen_->InvokeRuntime(kQuickAllocStringFromChars, invoke, invoke->GetDexPc());
}
// java.lang.StringFactory.newStringFromString(String toCopy)
@@ -2202,14 +2173,7 @@
SlowPathCodeMIPS* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS(invoke);
codegen_->AddSlowPath(slow_path);
__ Beqz(string_to_copy, slow_path->GetEntryLabel());
-
- __ LoadFromOffset(kLoadWord,
- T9,
- TR,
- QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, pAllocStringFromString).Int32Value());
- __ Jalr(T9);
- __ Nop();
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ codegen_->InvokeRuntime(kQuickAllocStringFromString, invoke, invoke->GetDexPc());
__ Bind(slow_path->GetExitLabel());
}
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 1e18540..be8eb51 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1519,7 +1519,7 @@
// int java.lang.String.compareTo(String anotherString)
void IntrinsicLocationsBuilderMIPS64::VisitStringCompareTo(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -1540,12 +1540,7 @@
codegen_->AddSlowPath(slow_path);
__ Beqzc(argument, slow_path->GetEntryLabel());
- __ LoadFromOffset(kLoadDoubleword,
- T9,
- TR,
- QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize, pStringCompareTo).Int32Value());
- __ Jalr(T9);
- __ Nop();
+ codegen_->InvokeRuntime(kQuickStringCompareTo, invoke, invoke->GetDexPc(), slow_path);
__ Bind(slow_path->GetExitLabel());
}
@@ -1691,13 +1686,8 @@
__ Clear(tmp_reg);
}
- __ LoadFromOffset(kLoadDoubleword,
- T9,
- TR,
- QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize, pIndexOf).Int32Value());
+ codegen->InvokeRuntime(kQuickIndexOf, invoke, invoke->GetDexPc(), slow_path);
CheckEntrypointTypes<kQuickIndexOf, int32_t, void*, uint32_t, uint32_t>();
- __ Jalr(T9);
- __ Nop();
if (slow_path != nullptr) {
__ Bind(slow_path->GetExitLabel());
@@ -1768,15 +1758,8 @@
codegen_->AddSlowPath(slow_path);
__ Beqzc(byte_array, slow_path->GetEntryLabel());
- __ LoadFromOffset(kLoadDoubleword,
- T9,
- TR,
- QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize,
- pAllocStringFromBytes).Int32Value());
+ codegen_->InvokeRuntime(kQuickAllocStringFromBytes, invoke, invoke->GetDexPc(), slow_path);
CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>();
- __ Jalr(T9);
- __ Nop();
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
__ Bind(slow_path->GetExitLabel());
}
@@ -1794,23 +1777,14 @@
}
void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromChars(HInvoke* invoke) {
- Mips64Assembler* assembler = GetAssembler();
-
// No need to emit code checking whether `locations->InAt(2)` is a null
// pointer, as callers of the native method
//
// java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
//
// all include a null check on `data` before calling that method.
- __ LoadFromOffset(kLoadDoubleword,
- T9,
- TR,
- QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize,
- pAllocStringFromChars).Int32Value());
+ codegen_->InvokeRuntime(kQuickAllocStringFromChars, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>();
- __ Jalr(T9);
- __ Nop();
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
// java.lang.StringFactory.newStringFromString(String toCopy)
@@ -1833,15 +1807,8 @@
codegen_->AddSlowPath(slow_path);
__ Beqzc(string_to_copy, slow_path->GetEntryLabel());
- __ LoadFromOffset(kLoadDoubleword,
- T9,
- TR,
- QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize,
- pAllocStringFromString).Int32Value());
+ codegen_->InvokeRuntime(kQuickAllocStringFromString, invoke, invoke->GetDexPc(), slow_path);
CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>();
- __ Jalr(T9);
- __ Nop();
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
__ Bind(slow_path->GetExitLabel());
}
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index cf4a040..d17f85e 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -963,7 +963,7 @@
}
// Now do the actual call.
- __ fs()->call(Address::Absolute(GetThreadOffset<kX86PointerSize>(entry)));
+ codegen->InvokeRuntime(entry, invoke, invoke->GetDexPc());
// Extract the return value from the FP stack.
__ fstpl(Address(ESP, 0));
@@ -972,8 +972,6 @@
// And clean up the stack.
__ addl(ESP, Immediate(16));
__ cfi().AdjustCFAOffset(-16);
-
- codegen->RecordPcInfo(invoke, invoke->GetDexPc());
}
void IntrinsicLocationsBuilderX86::VisitMathCos(HInvoke* invoke) {
@@ -1343,7 +1341,7 @@
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pStringCompareTo)));
+ codegen_->InvokeRuntime(kQuickStringCompareTo, invoke, invoke->GetDexPc(), slow_path);
__ Bind(slow_path->GetExitLabel());
}
@@ -1616,9 +1614,8 @@
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pAllocStringFromBytes)));
+ codegen_->InvokeRuntime(kQuickAllocStringFromBytes, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>();
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
__ Bind(slow_path->GetExitLabel());
}
@@ -1634,17 +1631,14 @@
}
void IntrinsicCodeGeneratorX86::VisitStringNewStringFromChars(HInvoke* invoke) {
- X86Assembler* assembler = GetAssembler();
-
// No need to emit code checking whether `locations->InAt(2)` is a null
// pointer, as callers of the native method
//
// java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
//
// all include a null check on `data` before calling that method.
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pAllocStringFromChars)));
+ codegen_->InvokeRuntime(kQuickAllocStringFromChars, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>();
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
void IntrinsicLocationsBuilderX86::VisitStringNewStringFromString(HInvoke* invoke) {
@@ -1666,10 +1660,8 @@
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
- __ fs()->call(
- Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pAllocStringFromString)));
+ codegen_->InvokeRuntime(kQuickAllocStringFromString, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>();
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
__ Bind(slow_path->GetExitLabel());
}
@@ -1984,6 +1976,9 @@
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall,
kIntrinsified);
+ if (can_call && kUseBakerReadBarrier) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ }
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index a4ee546..f8f30d9 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -766,10 +766,8 @@
LocationSummary* locations = invoke->GetLocations();
DCHECK(locations->WillCall());
DCHECK(invoke->IsInvokeStaticOrDirect());
- X86_64Assembler* assembler = codegen->GetAssembler();
- __ gs()->call(Address::Absolute(GetThreadOffset<kX86_64PointerSize>(entry), true));
- codegen->RecordPcInfo(invoke, invoke->GetDexPc());
+ codegen->InvokeRuntime(entry, invoke, invoke->GetDexPc());
}
void IntrinsicLocationsBuilderX86_64::VisitMathCos(HInvoke* invoke) {
@@ -1510,8 +1508,7 @@
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
- __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize, pStringCompareTo),
- /* no_rip */ true));
+ codegen_->InvokeRuntime(kQuickStringCompareTo, invoke, invoke->GetDexPc(), slow_path);
__ Bind(slow_path->GetExitLabel());
}
@@ -1783,11 +1780,8 @@
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
- __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize,
- pAllocStringFromBytes),
- /* no_rip */ true));
+ codegen_->InvokeRuntime(kQuickAllocStringFromBytes, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>();
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
__ Bind(slow_path->GetExitLabel());
}
@@ -1803,19 +1797,14 @@
}
void IntrinsicCodeGeneratorX86_64::VisitStringNewStringFromChars(HInvoke* invoke) {
- X86_64Assembler* assembler = GetAssembler();
-
// No need to emit code checking whether `locations->InAt(2)` is a null
// pointer, as callers of the native method
//
// java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
//
// all include a null check on `data` before calling that method.
- __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize,
- pAllocStringFromChars),
- /* no_rip */ true));
+ codegen_->InvokeRuntime(kQuickAllocStringFromChars, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>();
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromString(HInvoke* invoke) {
@@ -1837,11 +1826,8 @@
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
- __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize,
- pAllocStringFromString),
- /* no_rip */ true));
+ codegen_->InvokeRuntime(kQuickAllocStringFromString, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>();
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
__ Bind(slow_path->GetExitLabel());
}
@@ -2123,6 +2109,9 @@
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall,
kIntrinsified);
+ if (can_call && kUseBakerReadBarrier) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ }
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
diff --git a/compiler/optimizing/locations.cc b/compiler/optimizing/locations.cc
index 83596da..1b1b3a7 100644
--- a/compiler/optimizing/locations.cc
+++ b/compiler/optimizing/locations.cc
@@ -27,12 +27,14 @@
: inputs_(instruction->InputCount(),
instruction->GetBlock()->GetGraph()->GetArena()->Adapter(kArenaAllocLocationSummary)),
temps_(instruction->GetBlock()->GetGraph()->GetArena()->Adapter(kArenaAllocLocationSummary)),
- output_overlaps_(Location::kOutputOverlap),
call_kind_(call_kind),
+ intrinsified_(intrinsified),
+ has_custom_slow_path_calling_convention_(false),
+ output_overlaps_(Location::kOutputOverlap),
stack_mask_(nullptr),
register_mask_(0),
live_registers_(),
- intrinsified_(intrinsified) {
+ custom_slow_path_caller_saves_() {
instruction->SetLocations(this);
if (NeedsSafepoint()) {
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index 5fdfb9b..4384042 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -20,6 +20,7 @@
#include "base/arena_containers.h"
#include "base/arena_object.h"
#include "base/bit_field.h"
+#include "base/bit_utils.h"
#include "base/bit_vector.h"
#include "base/value_object.h"
@@ -452,7 +453,7 @@
}
size_t GetNumberOfRegisters() const {
- return __builtin_popcount(core_registers_) + __builtin_popcount(floating_point_registers_);
+ return POPCOUNT(core_registers_) + POPCOUNT(floating_point_registers_);
}
uint32_t GetCoreRegisters() const {
@@ -466,8 +467,6 @@
private:
uint32_t core_registers_;
uint32_t floating_point_registers_;
-
- DISALLOW_COPY_AND_ASSIGN(RegisterSet);
};
static constexpr bool kIntrinsified = true;
@@ -569,6 +568,21 @@
return CanCall();
}
+ void SetCustomSlowPathCallerSaves(const RegisterSet& caller_saves) {
+ DCHECK(OnlyCallsOnSlowPath());
+ has_custom_slow_path_calling_convention_ = true;
+ custom_slow_path_caller_saves_ = caller_saves;
+ }
+
+ bool HasCustomSlowPathCallingConvention() const {
+ return has_custom_slow_path_calling_convention_;
+ }
+
+ const RegisterSet& GetCustomSlowPathCallerSaves() const {
+ DCHECK(HasCustomSlowPathCallingConvention());
+ return custom_slow_path_caller_saves_;
+ }
+
void SetStackBit(uint32_t index) {
stack_mask_->SetBit(index);
}
@@ -628,18 +642,18 @@
return intrinsified_;
}
- void SetIntrinsified(bool intrinsified) {
- intrinsified_ = intrinsified;
- }
-
private:
ArenaVector<Location> inputs_;
ArenaVector<Location> temps_;
+ const CallKind call_kind_;
+ // Whether these are locations for an intrinsified call.
+ const bool intrinsified_;
+ // Whether the slow path has default or custom calling convention.
+ bool has_custom_slow_path_calling_convention_;
// Whether the output overlaps with any of the inputs. If it overlaps, then it cannot
// share the same register as the inputs.
Location::OutputOverlap output_overlaps_;
Location output_;
- const CallKind call_kind_;
// Mask of objects that live in the stack.
BitVector* stack_mask_;
@@ -650,8 +664,8 @@
// Registers that are in use at this position.
RegisterSet live_registers_;
- // Whether these are locations for an intrinsified call.
- bool intrinsified_;
+ // Custom slow path caller saves. Valid only if indicated by slow_path_calling_convention_.
+ RegisterSet custom_slow_path_caller_saves_;
friend class RegisterAllocatorTest;
DISALLOW_COPY_AND_ASSIGN(LocationSummary);
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 2808e1b..8f37236 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2242,7 +2242,7 @@
}
static void CheckAgainstUpperBound(ReferenceTypeInfo rti, ReferenceTypeInfo upper_bound_rti)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (rti.IsValid()) {
DCHECK(upper_bound_rti.IsSupertypeOf(rti))
<< " upper_bound_rti: " << upper_bound_rti
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index dfa8276..149a71d 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -171,7 +171,7 @@
static ReferenceTypeInfo Create(TypeHandle type_handle, bool is_exact);
- static ReferenceTypeInfo Create(TypeHandle type_handle) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static ReferenceTypeInfo Create(TypeHandle type_handle) REQUIRES_SHARED(Locks::mutator_lock_) {
return Create(type_handle, type_handle->CannotBeAssignedFromOtherTypes());
}
@@ -191,49 +191,49 @@
bool IsExact() const { return is_exact_; }
- bool IsObjectClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsObjectClass() const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
return GetTypeHandle()->IsObjectClass();
}
- bool IsStringClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsStringClass() const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
return GetTypeHandle()->IsStringClass();
}
- bool IsObjectArray() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsObjectArray() const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
return IsArrayClass() && GetTypeHandle()->GetComponentType()->IsObjectClass();
}
- bool IsInterface() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsInterface() const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
return GetTypeHandle()->IsInterface();
}
- bool IsArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsArrayClass() const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
return GetTypeHandle()->IsArrayClass();
}
- bool IsPrimitiveArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPrimitiveArrayClass() const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
return GetTypeHandle()->IsPrimitiveArray();
}
- bool IsNonPrimitiveArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsNonPrimitiveArrayClass() const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
return GetTypeHandle()->IsArrayClass() && !GetTypeHandle()->IsPrimitiveArray();
}
- bool CanArrayHold(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool CanArrayHold(ReferenceTypeInfo rti) const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
if (!IsExact()) return false;
if (!IsArrayClass()) return false;
return GetTypeHandle()->GetComponentType()->IsAssignableFrom(rti.GetTypeHandle().Get());
}
- bool CanArrayHoldValuesOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool CanArrayHoldValuesOf(ReferenceTypeInfo rti) const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
if (!IsExact()) return false;
if (!IsArrayClass()) return false;
@@ -244,13 +244,13 @@
Handle<mirror::Class> GetTypeHandle() const { return type_handle_; }
- bool IsSupertypeOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsSupertypeOf(ReferenceTypeInfo rti) const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
DCHECK(rti.IsValid());
return GetTypeHandle()->IsAssignableFrom(rti.GetTypeHandle().Get());
}
- bool IsStrictSupertypeOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsStrictSupertypeOf(ReferenceTypeInfo rti) const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
DCHECK(rti.IsValid());
return GetTypeHandle().Get() != rti.GetTypeHandle().Get() &&
@@ -260,7 +260,7 @@
// Returns true if the type information provide the same amount of details.
// Note that it does not mean that the instructions have the same actual type
// (because the type can be the result of a merge).
- bool IsEqual(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsEqual(ReferenceTypeInfo rti) const REQUIRES_SHARED(Locks::mutator_lock_) {
if (!IsValid() && !rti.IsValid()) {
// Invalid types are equal.
return true;
@@ -5021,7 +5021,7 @@
Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
: HExpression(field_type,
- SideEffects::FieldReadOfType(field_type, is_volatile),
+ SideEffectsForArchRuntimeCalls(field_type, is_volatile),
dex_pc),
field_info_(field_offset,
field_type,
@@ -5053,6 +5053,16 @@
Primitive::Type GetFieldType() const { return field_info_.GetFieldType(); }
bool IsVolatile() const { return field_info_.IsVolatile(); }
+ static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type field_type, bool is_volatile) {
+ SideEffects side_effects = SideEffects::FieldReadOfType(field_type, is_volatile);
+
+ // MIPS delegates volatile kPrimLong and kPrimDouble loads to a runtime helper.
+ if (Primitive::Is64BitType(field_type)) {
+ side_effects.Add(SideEffects::CanTriggerGC());
+ }
+ return side_effects;
+ }
+
DECLARE_INSTRUCTION(InstanceFieldGet);
private:
@@ -5073,7 +5083,7 @@
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
- : HTemplateInstruction(SideEffects::FieldWriteOfType(field_type, is_volatile),
+ : HTemplateInstruction(SideEffectsForArchRuntimeCalls(field_type, is_volatile),
dex_pc),
field_info_(field_offset,
field_type,
@@ -5099,6 +5109,16 @@
bool GetValueCanBeNull() const { return GetPackedFlag<kFlagValueCanBeNull>(); }
void ClearValueCanBeNull() { SetPackedFlag<kFlagValueCanBeNull>(false); }
+ static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type field_type, bool is_volatile) {
+ SideEffects side_effects = SideEffects::FieldWriteOfType(field_type, is_volatile);
+
+ // MIPS delegates volatile kPrimLong and kPrimDouble stores to a runtime helper.
+ if (Primitive::Is64BitType(field_type)) {
+ side_effects.Add(SideEffects::CanTriggerGC());
+ }
+ return side_effects;
+ }
+
DECLARE_INSTRUCTION(InstanceFieldSet);
private:
@@ -5441,7 +5461,8 @@
bool is_referrers_class,
uint32_t dex_pc,
bool needs_access_check,
- bool is_in_dex_cache)
+ bool is_in_dex_cache,
+ bool is_in_boot_image)
: HInstruction(SideEffectsForArchRuntimeCalls(), dex_pc),
special_input_(HUserRecord<HInstruction*>(current_method)),
type_index_(type_index),
@@ -5455,6 +5476,7 @@
is_referrers_class ? LoadKind::kReferrersClass : LoadKind::kDexCacheViaMethod);
SetPackedFlag<kFlagNeedsAccessCheck>(needs_access_check);
SetPackedFlag<kFlagIsInDexCache>(is_in_dex_cache);
+ SetPackedFlag<kFlagIsInBootImage>(is_in_boot_image);
SetPackedFlag<kFlagGenerateClInitCheck>(false);
}
@@ -5545,6 +5567,7 @@
bool IsReferrersClass() const { return GetLoadKind() == LoadKind::kReferrersClass; }
bool NeedsAccessCheck() const { return GetPackedFlag<kFlagNeedsAccessCheck>(); }
bool IsInDexCache() const { return GetPackedFlag<kFlagIsInDexCache>(); }
+ bool IsInBootImage() const { return GetPackedFlag<kFlagIsInBootImage>(); }
bool MustGenerateClinitCheck() const { return GetPackedFlag<kFlagGenerateClInitCheck>(); }
void MarkInDexCache() {
@@ -5554,6 +5577,10 @@
SetSideEffects(SideEffects::None());
}
+ void MarkInBootImage() {
+ SetPackedFlag<kFlagIsInBootImage>(true);
+ }
+
void AddSpecialInput(HInstruction* special_input);
using HInstruction::GetInputRecords; // Keep the const version visible.
@@ -5571,9 +5598,10 @@
private:
static constexpr size_t kFlagNeedsAccessCheck = kNumberOfGenericPackedBits;
static constexpr size_t kFlagIsInDexCache = kFlagNeedsAccessCheck + 1;
+ static constexpr size_t kFlagIsInBootImage = kFlagIsInDexCache + 1;
// Whether this instruction must generate the initialization check.
// Used for code generation.
- static constexpr size_t kFlagGenerateClInitCheck = kFlagIsInDexCache + 1;
+ static constexpr size_t kFlagGenerateClInitCheck = kFlagIsInBootImage + 1;
static constexpr size_t kFieldLoadKind = kFlagGenerateClInitCheck + 1;
static constexpr size_t kFieldLoadKindSize =
MinimumBitsToStore(static_cast<size_t>(LoadKind::kLast));
@@ -5896,7 +5924,7 @@
Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
: HExpression(field_type,
- SideEffects::FieldReadOfType(field_type, is_volatile),
+ SideEffectsForArchRuntimeCalls(field_type, is_volatile),
dex_pc),
field_info_(field_offset,
field_type,
@@ -5925,6 +5953,16 @@
Primitive::Type GetFieldType() const { return field_info_.GetFieldType(); }
bool IsVolatile() const { return field_info_.IsVolatile(); }
+ static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type field_type, bool is_volatile) {
+ SideEffects side_effects = SideEffects::FieldReadOfType(field_type, is_volatile);
+
+ // MIPS delegates volatile kPrimLong and kPrimDouble loads to a runtime helper.
+ if (Primitive::Is64BitType(field_type)) {
+ side_effects.Add(SideEffects::CanTriggerGC());
+ }
+ return side_effects;
+ }
+
DECLARE_INSTRUCTION(StaticFieldGet);
private:
@@ -5945,7 +5983,7 @@
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
- : HTemplateInstruction(SideEffects::FieldWriteOfType(field_type, is_volatile),
+ : HTemplateInstruction(SideEffectsForArchRuntimeCalls(field_type, is_volatile),
dex_pc),
field_info_(field_offset,
field_type,
@@ -5968,6 +6006,16 @@
bool GetValueCanBeNull() const { return GetPackedFlag<kFlagValueCanBeNull>(); }
void ClearValueCanBeNull() { SetPackedFlag<kFlagValueCanBeNull>(false); }
+ static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type field_type, bool is_volatile) {
+ SideEffects side_effects = SideEffects::FieldWriteOfType(field_type, is_volatile);
+
+ // MIPS delegates volatile kPrimLong and kPrimDouble stores to a runtime helper.
+ if (Primitive::Is64BitType(field_type)) {
+ side_effects.Add(SideEffects::CanTriggerGC());
+ }
+ return side_effects;
+ }
+
DECLARE_INSTRUCTION(StaticFieldSet);
private:
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index 8c0231e..a1e923b 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -19,6 +19,7 @@
#include "arch/instruction_set.h"
#include "cfi_test.h"
+#include "driver/compiler_options.h"
#include "gtest/gtest.h"
#include "optimizing/code_generator.h"
#include "optimizing/optimizing_unit_test.h"
diff --git a/compiler/optimizing/optimizing_cfi_test_expected.inc b/compiler/optimizing/optimizing_cfi_test_expected.inc
index 05eb063..6c5030c 100644
--- a/compiler/optimizing/optimizing_cfi_test_expected.inc
+++ b/compiler/optimizing/optimizing_cfi_test_expected.inc
@@ -144,12 +144,12 @@
0x34, 0x00, 0xB0, 0xAF, 0x28, 0x00, 0xB6, 0xF7, 0x20, 0x00, 0xB4, 0xF7,
0x00, 0x00, 0xA4, 0xAF, 0x3C, 0x00, 0xBF, 0x8F, 0x38, 0x00, 0xB1, 0x8F,
0x34, 0x00, 0xB0, 0x8F, 0x28, 0x00, 0xB6, 0xD7, 0x20, 0x00, 0xB4, 0xD7,
- 0x40, 0x00, 0xBD, 0x27, 0x09, 0x00, 0xE0, 0x03, 0x00, 0x00, 0x00, 0x00,
+ 0x09, 0x00, 0xE0, 0x03, 0x40, 0x00, 0xBD, 0x27,
};
static constexpr uint8_t expected_cfi_kMips[] = {
0x44, 0x0E, 0x40, 0x44, 0x9F, 0x01, 0x44, 0x91, 0x02, 0x44, 0x90, 0x03,
- 0x4C, 0x0A, 0x44, 0xDF, 0x44, 0xD1, 0x44, 0xD0, 0x4C, 0x0E, 0x00, 0x48,
- 0x0B, 0x0E, 0x40,
+ 0x4C, 0x0A, 0x44, 0xDF, 0x44, 0xD1, 0x44, 0xD0, 0x50, 0x0E, 0x00, 0x0B,
+ 0x0E, 0x40,
};
// 0x00000000: addiu r29, r29, -64
// 0x00000004: .cfi_def_cfa_offset: 64
@@ -171,12 +171,11 @@
// 0x00000028: .cfi_restore: r16
// 0x00000028: ldc1 f22, +40(r29)
// 0x0000002c: ldc1 f20, +32(r29)
-// 0x00000030: addiu r29, r29, 64
-// 0x00000034: .cfi_def_cfa_offset: 0
-// 0x00000034: jr r31
-// 0x00000038: nop
-// 0x0000003c: .cfi_restore_state
-// 0x0000003c: .cfi_def_cfa_offset: 64
+// 0x00000030: jr r31
+// 0x00000034: addiu r29, r29, 64
+// 0x00000038: .cfi_def_cfa_offset: 0
+// 0x00000038: .cfi_restore_state
+// 0x00000038: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kMips64[] = {
0xD8, 0xFF, 0xBD, 0x67, 0x20, 0x00, 0xBF, 0xFF, 0x18, 0x00, 0xB1, 0xFF,
@@ -348,14 +347,13 @@
};
static constexpr uint8_t expected_asm_kMips_adjust_tail[] = {
0x3C, 0x00, 0xBF, 0x8F, 0x38, 0x00, 0xB1, 0x8F, 0x34, 0x00, 0xB0, 0x8F,
- 0x28, 0x00, 0xB6, 0xD7, 0x20, 0x00, 0xB4, 0xD7, 0x40, 0x00, 0xBD, 0x27,
- 0x09, 0x00, 0xE0, 0x03, 0x00, 0x00, 0x00, 0x00,
+ 0x28, 0x00, 0xB6, 0xD7, 0x20, 0x00, 0xB4, 0xD7, 0x09, 0x00, 0xE0, 0x03,
+ 0x40, 0x00, 0xBD, 0x27,
};
static constexpr uint8_t expected_cfi_kMips_adjust[] = {
0x44, 0x0E, 0x40, 0x44, 0x9F, 0x01, 0x44, 0x91, 0x02, 0x44, 0x90, 0x03,
0x54, 0x0E, 0x44, 0x60, 0x0E, 0x40, 0x04, 0x04, 0x00, 0x02, 0x00, 0x0A,
- 0x44, 0xDF, 0x44, 0xD1, 0x44, 0xD0, 0x4C, 0x0E, 0x00, 0x48, 0x0B, 0x0E,
- 0x40,
+ 0x44, 0xDF, 0x44, 0xD1, 0x44, 0xD0, 0x50, 0x0E, 0x00, 0x0B, 0x0E, 0x40,
};
// 0x00000000: addiu r29, r29, -64
// 0x00000004: .cfi_def_cfa_offset: 64
@@ -392,12 +390,11 @@
// 0x00020054: .cfi_restore: r16
// 0x00020054: ldc1 f22, +40(r29)
// 0x00020058: ldc1 f20, +32(r29)
-// 0x0002005c: addiu r29, r29, 64
-// 0x00020060: .cfi_def_cfa_offset: 0
-// 0x00020060: jr r31
-// 0x00020064: nop
-// 0x00020068: .cfi_restore_state
-// 0x00020068: .cfi_def_cfa_offset: 64
+// 0x0002005c: jr r31
+// 0x00020060: addiu r29, r29, 64
+// 0x00020064: .cfi_def_cfa_offset: 0
+// 0x00020064: .cfi_restore_state
+// 0x00020064: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kMips64_adjust_head[] = {
0xD8, 0xFF, 0xBD, 0x67, 0x20, 0x00, 0xBF, 0xFF, 0x18, 0x00, 0xB1, 0xFF,
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 6e98b4d..c5d7611 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -294,7 +294,7 @@
}
uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet())));
}
@@ -311,7 +311,7 @@
bool JitCompile(Thread* self, jit::JitCodeCache* code_cache, ArtMethod* method, bool osr)
OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
void RunOptimizations(HGraph* graph,
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index e96ab19..4289cf7 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -27,7 +27,7 @@
static inline mirror::DexCache* FindDexCacheWithHint(Thread* self,
const DexFile& dex_file,
Handle<mirror::DexCache> hint_dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (LIKELY(hint_dex_cache->GetDexFile() == &dex_file)) {
return hint_dex_cache.Get();
} else {
@@ -85,7 +85,7 @@
void VisitParameterValue(HParameterValue* instr) OVERRIDE;
void UpdateFieldAccessTypeInfo(HInstruction* instr, const FieldInfo& info);
void SetClassAsTypeInfo(HInstruction* instr, mirror::Class* klass, bool is_exact)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void VisitInstanceFieldGet(HInstanceFieldGet* instr) OVERRIDE;
void VisitStaticFieldGet(HStaticFieldGet* instr) OVERRIDE;
void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) OVERRIDE;
@@ -194,7 +194,7 @@
ReferenceTypeInfo upper_bound,
HInstruction* dominator_instr,
HBasicBlock* dominator_block)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// If the position where we should insert the bound type is not already a
// a bound type then we need to create one.
if (position == nullptr || !position->IsBoundType()) {
@@ -487,7 +487,7 @@
const DexFile& dex_file,
uint16_t type_idx,
Handle<mirror::DexCache> hint_dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::DexCache* dex_cache = FindDexCacheWithHint(self, dex_file, hint_dex_cache);
// Get type from dex cache assuming it was populated by the verifier.
return dex_cache->GetResolvedType(type_idx);
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index edd83bf..1fa6624 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -44,7 +44,7 @@
// Returns true if klass is admissible to the propagation: non-null and resolved.
// For an array type, we also check if the component type is admissible.
- static bool IsAdmissible(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static bool IsAdmissible(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) {
return klass != nullptr &&
klass->IsResolved() &&
(!klass->IsArrayClass() || IsAdmissible(klass->GetComponentType()));
@@ -58,7 +58,7 @@
explicit HandleCache(StackHandleScopeCollection* handles) : handles_(handles) { }
template <typename T>
- MutableHandle<T> NewHandle(T* object) SHARED_REQUIRES(Locks::mutator_lock_) {
+ MutableHandle<T> NewHandle(T* object) REQUIRES_SHARED(Locks::mutator_lock_) {
return handles_->NewHandle(object);
}
@@ -80,8 +80,8 @@
void VisitPhi(HPhi* phi);
void VisitBasicBlock(HBasicBlock* block);
- void UpdateBoundType(HBoundType* bound_type) SHARED_REQUIRES(Locks::mutator_lock_);
- void UpdatePhi(HPhi* phi) SHARED_REQUIRES(Locks::mutator_lock_);
+ void UpdateBoundType(HBoundType* bound_type) REQUIRES_SHARED(Locks::mutator_lock_);
+ void UpdatePhi(HPhi* phi) REQUIRES_SHARED(Locks::mutator_lock_);
void BoundTypeForIfNotNull(HBasicBlock* block);
void BoundTypeForIfInstanceOf(HBasicBlock* block);
void ProcessWorklist();
@@ -92,10 +92,10 @@
bool UpdateReferenceTypeInfo(HInstruction* instr);
static void UpdateArrayGet(HArrayGet* instr, HandleCache* handle_cache)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ReferenceTypeInfo MergeTypes(const ReferenceTypeInfo& a, const ReferenceTypeInfo& b)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void ValidateTypes();
diff --git a/compiler/optimizing/reference_type_propagation_test.cc b/compiler/optimizing/reference_type_propagation_test.cc
index 7649b50..75a4eac 100644
--- a/compiler/optimizing/reference_type_propagation_test.cc
+++ b/compiler/optimizing/reference_type_propagation_test.cc
@@ -46,7 +46,7 @@
// Relay method to merge type in reference type propagation.
ReferenceTypeInfo MergeTypes(const ReferenceTypeInfo& a,
- const ReferenceTypeInfo& b) SHARED_REQUIRES(Locks::mutator_lock_) {
+ const ReferenceTypeInfo& b) REQUIRES_SHARED(Locks::mutator_lock_) {
return propagation_->MergeTypes(a, b);
}
@@ -56,12 +56,12 @@
}
// Helper method to construct the Object type.
- ReferenceTypeInfo ObjectType(bool is_exact = true) SHARED_REQUIRES(Locks::mutator_lock_) {
+ ReferenceTypeInfo ObjectType(bool is_exact = true) REQUIRES_SHARED(Locks::mutator_lock_) {
return ReferenceTypeInfo::Create(propagation_->handle_cache_.GetObjectClassHandle(), is_exact);
}
// Helper method to construct the String type.
- ReferenceTypeInfo StringType(bool is_exact = true) SHARED_REQUIRES(Locks::mutator_lock_) {
+ ReferenceTypeInfo StringType(bool is_exact = true) REQUIRES_SHARED(Locks::mutator_lock_) {
return ReferenceTypeInfo::Create(propagation_->handle_cache_.GetStringClassHandle(), is_exact);
}
diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc
index 3450286..ad7a8a3 100644
--- a/compiler/optimizing/register_allocation_resolver.cc
+++ b/compiler/optimizing/register_allocation_resolver.cc
@@ -28,8 +28,7 @@
codegen_(codegen),
liveness_(liveness) {}
-void RegisterAllocationResolver::Resolve(size_t max_safepoint_live_core_regs,
- size_t max_safepoint_live_fp_regs,
+void RegisterAllocationResolver::Resolve(ArrayRef<HInstruction* const> safepoints,
size_t reserved_out_slots,
size_t int_spill_slots,
size_t long_spill_slots,
@@ -43,10 +42,13 @@
+ double_spill_slots
+ catch_phi_spill_slots;
+ // Update safepoints and calculate the size of the spills.
+ UpdateSafepointLiveRegisters();
+ size_t maximum_safepoint_spill_size = CalculateMaximumSafepointSpillSize(safepoints);
+
// Computes frame size and spill mask.
codegen_->InitializeCodeGeneration(spill_slots,
- max_safepoint_live_core_regs,
- max_safepoint_live_fp_regs,
+ maximum_safepoint_spill_size,
reserved_out_slots, // Includes slot(s) for the art method.
codegen_->GetGraph()->GetLinearOrder());
@@ -135,8 +137,7 @@
// Connect siblings and resolve inputs.
for (size_t i = 0, e = liveness_.GetNumberOfSsaValues(); i < e; ++i) {
HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
- ConnectSiblings(instruction->GetLiveInterval(),
- max_safepoint_live_core_regs + max_safepoint_live_fp_regs);
+ ConnectSiblings(instruction->GetLiveInterval());
}
// Resolve non-linear control flow across branches. Order does not matter.
@@ -222,8 +223,73 @@
}
}
-void RegisterAllocationResolver::ConnectSiblings(LiveInterval* interval,
- size_t max_safepoint_live_regs) {
+void RegisterAllocationResolver::UpdateSafepointLiveRegisters() {
+ for (size_t i = 0, e = liveness_.GetNumberOfSsaValues(); i < e; ++i) {
+ HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
+ for (LiveInterval* current = instruction->GetLiveInterval();
+ current != nullptr;
+ current = current->GetNextSibling()) {
+ if (!current->HasRegister()) {
+ continue;
+ }
+ Location source = current->ToLocation();
+ for (SafepointPosition* safepoint_position = current->GetFirstSafepoint();
+ safepoint_position != nullptr;
+ safepoint_position = safepoint_position->GetNext()) {
+ DCHECK(current->CoversSlow(safepoint_position->GetPosition()));
+ LocationSummary* locations = safepoint_position->GetLocations();
+ switch (source.GetKind()) {
+ case Location::kRegister:
+ case Location::kFpuRegister: {
+ locations->AddLiveRegister(source);
+ break;
+ }
+ case Location::kRegisterPair:
+ case Location::kFpuRegisterPair: {
+ locations->AddLiveRegister(source.ToLow());
+ locations->AddLiveRegister(source.ToHigh());
+ break;
+ }
+ case Location::kStackSlot: // Fall-through
+ case Location::kDoubleStackSlot: // Fall-through
+ case Location::kConstant: {
+ // Nothing to do.
+ break;
+ }
+ default: {
+ LOG(FATAL) << "Unexpected location for object";
+ }
+ }
+ }
+ }
+ }
+}
+
+size_t RegisterAllocationResolver::CalculateMaximumSafepointSpillSize(
+ ArrayRef<HInstruction* const> safepoints) {
+ size_t core_register_spill_size = codegen_->GetWordSize();
+ size_t fp_register_spill_size = codegen_->GetFloatingPointSpillSlotSize();
+ size_t maximum_safepoint_spill_size = 0u;
+ for (HInstruction* instruction : safepoints) {
+ LocationSummary* locations = instruction->GetLocations();
+ if (locations->OnlyCallsOnSlowPath()) {
+ size_t core_spills =
+ codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ true);
+ size_t fp_spills =
+ codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ false);
+ size_t spill_size =
+ core_register_spill_size * core_spills + fp_register_spill_size * fp_spills;
+ maximum_safepoint_spill_size = std::max(maximum_safepoint_spill_size, spill_size);
+ } else if (locations->CallsOnMainAndSlowPath()) {
+ // Nothing to spill on the slow path if the main path already clobbers caller-saves.
+ DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ true));
+ DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ false));
+ }
+ }
+ return maximum_safepoint_spill_size;
+}
+
+void RegisterAllocationResolver::ConnectSiblings(LiveInterval* interval) {
LiveInterval* current = interval;
if (current->HasSpillSlot()
&& current->HasRegister()
@@ -306,48 +372,16 @@
safepoint_position = safepoint_position->GetNext()) {
DCHECK(current->CoversSlow(safepoint_position->GetPosition()));
- LocationSummary* locations = safepoint_position->GetLocations();
- if ((current->GetType() == Primitive::kPrimNot) && current->GetParent()->HasSpillSlot()) {
+ if (current->GetType() == Primitive::kPrimNot) {
DCHECK(interval->GetDefinedBy()->IsActualObject())
<< interval->GetDefinedBy()->DebugName()
<< "@" << safepoint_position->GetInstruction()->DebugName();
- locations->SetStackBit(current->GetParent()->GetSpillSlot() / kVRegSize);
- }
-
- switch (source.GetKind()) {
- case Location::kRegister: {
- locations->AddLiveRegister(source);
- if (kIsDebugBuild && locations->OnlyCallsOnSlowPath()) {
- DCHECK_LE(locations->GetNumberOfLiveRegisters(),
- max_safepoint_live_regs);
- }
- if (current->GetType() == Primitive::kPrimNot) {
- DCHECK(interval->GetDefinedBy()->IsActualObject())
- << interval->GetDefinedBy()->DebugName()
- << "@" << safepoint_position->GetInstruction()->DebugName();
- locations->SetRegisterBit(source.reg());
- }
- break;
+ LocationSummary* locations = safepoint_position->GetLocations();
+ if (current->GetParent()->HasSpillSlot()) {
+ locations->SetStackBit(current->GetParent()->GetSpillSlot() / kVRegSize);
}
- case Location::kFpuRegister: {
- locations->AddLiveRegister(source);
- break;
- }
-
- case Location::kRegisterPair:
- case Location::kFpuRegisterPair: {
- locations->AddLiveRegister(source.ToLow());
- locations->AddLiveRegister(source.ToHigh());
- break;
- }
- case Location::kStackSlot: // Fall-through
- case Location::kDoubleStackSlot: // Fall-through
- case Location::kConstant: {
- // Nothing to do.
- break;
- }
- default: {
- LOG(FATAL) << "Unexpected location for object";
+ if (source.GetKind() == Location::kRegister) {
+ locations->SetRegisterBit(source.reg());
}
}
}
diff --git a/compiler/optimizing/register_allocation_resolver.h b/compiler/optimizing/register_allocation_resolver.h
index 6ceb9bc..a70ceae 100644
--- a/compiler/optimizing/register_allocation_resolver.h
+++ b/compiler/optimizing/register_allocation_resolver.h
@@ -20,6 +20,7 @@
#include "base/arena_containers.h"
#include "base/value_object.h"
#include "primitive.h"
+#include "utils/array_ref.h"
namespace art {
@@ -43,8 +44,7 @@
CodeGenerator* codegen,
const SsaLivenessAnalysis& liveness);
- void Resolve(size_t max_safepoint_live_core_regs,
- size_t max_safepoint_live_fp_regs,
+ void Resolve(ArrayRef<HInstruction* const> safepoints,
size_t reserved_out_slots, // Includes slot(s) for the art method.
size_t int_spill_slots,
size_t long_spill_slots,
@@ -54,10 +54,14 @@
const ArenaVector<LiveInterval*>& temp_intervals);
private:
+ // Update live registers of safepoint location summary.
+ void UpdateSafepointLiveRegisters();
+
+ // Calculate the maximum size of the spill area for safepoints.
+ size_t CalculateMaximumSafepointSpillSize(ArrayRef<HInstruction* const> safepoints);
+
// Connect adjacent siblings within blocks, and resolve inputs along the way.
- // Uses max_safepoint_live_regs to check that we did not underestimate the
- // number of live registers at safepoints.
- void ConnectSiblings(LiveInterval* interval, size_t max_safepoint_live_regs);
+ void ConnectSiblings(LiveInterval* interval);
// Connect siblings between block entries and exits.
void ConnectSplitSiblings(LiveInterval* interval, HBasicBlock* from, HBasicBlock* to) const;
diff --git a/compiler/optimizing/register_allocator_graph_color.cc b/compiler/optimizing/register_allocator_graph_color.cc
index a21595f..7178399 100644
--- a/compiler/optimizing/register_allocator_graph_color.cc
+++ b/compiler/optimizing/register_allocator_graph_color.cc
@@ -438,8 +438,7 @@
// track of live intervals across safepoints.
// TODO: Should build safepoints elsewhere.
void BuildInterferenceGraph(const ArenaVector<LiveInterval*>& intervals,
- const ArenaVector<InterferenceNode*>& physical_nodes,
- ArenaVector<InterferenceNode*>* safepoints);
+ const ArenaVector<InterferenceNode*>& physical_nodes);
// Add coalesce opportunities to interference nodes.
void FindCoalesceOpportunities();
@@ -566,11 +565,7 @@
num_long_spill_slots_(0),
catch_phi_spill_slot_counter_(0),
reserved_art_method_slots_(ComputeReservedArtMethodSlots(*codegen)),
- reserved_out_slots_(codegen->GetGraph()->GetMaximumNumberOfOutVRegs()),
- number_of_globally_blocked_core_regs_(0),
- number_of_globally_blocked_fp_regs_(0),
- max_safepoint_live_core_regs_(0),
- max_safepoint_live_fp_regs_(0) {
+ reserved_out_slots_(codegen->GetGraph()->GetMaximumNumberOfOutVRegs()) {
// Before we ask for blocked registers, set them up in the code generator.
codegen->SetupBlockedRegisters();
@@ -584,7 +579,6 @@
physical_core_nodes_[i]->stage = NodeStage::kPrecolored;
core_intervals_.push_back(interval);
if (codegen_->IsBlockedCoreRegister(i)) {
- ++number_of_globally_blocked_core_regs_;
interval->AddRange(0, liveness.GetMaxLifetimePosition());
}
}
@@ -597,7 +591,6 @@
physical_fp_nodes_[i]->stage = NodeStage::kPrecolored;
fp_intervals_.push_back(interval);
if (codegen_->IsBlockedFloatingPointRegister(i)) {
- ++number_of_globally_blocked_fp_regs_;
interval->AddRange(0, liveness.GetMaxLifetimePosition());
}
}
@@ -638,7 +631,7 @@
ArenaVector<InterferenceNode*>& physical_nodes = processing_core_regs
? physical_core_nodes_
: physical_fp_nodes_;
- iteration.BuildInterferenceGraph(intervals, physical_nodes, &safepoints);
+ iteration.BuildInterferenceGraph(intervals, physical_nodes);
// (3) Add coalesce opportunities.
// If we have tried coloring the graph a suspiciously high number of times, give
@@ -667,19 +660,6 @@
// Assign spill slots.
AllocateSpillSlots(iteration.GetPrunableNodes());
- // Compute the maximum number of live registers across safepoints.
- // Notice that we do not count globally blocked registers, such as the stack pointer.
- if (safepoints.size() > 0) {
- size_t max_safepoint_live_regs = ComputeMaxSafepointLiveRegisters(safepoints);
- if (processing_core_regs) {
- max_safepoint_live_core_regs_ =
- max_safepoint_live_regs - number_of_globally_blocked_core_regs_;
- } else {
- max_safepoint_live_fp_regs_=
- max_safepoint_live_regs - number_of_globally_blocked_fp_regs_;
- }
- }
-
// Tell the code generator which registers were allocated.
// We only look at prunable_nodes because we already told the code generator about
// fixed intervals while processing instructions. We also ignore the fixed intervals
@@ -711,8 +691,7 @@
// (6) Resolve locations and deconstruct SSA form.
RegisterAllocationResolver(allocator_, codegen_, liveness_)
- .Resolve(max_safepoint_live_core_regs_,
- max_safepoint_live_fp_regs_,
+ .Resolve(ArrayRef<HInstruction* const>(safepoints_),
reserved_art_method_slots_ + reserved_out_slots_,
num_int_spill_slots_,
num_long_spill_slots_,
@@ -989,24 +968,9 @@
void RegisterAllocatorGraphColor::CheckForSafepoint(HInstruction* instruction) {
LocationSummary* locations = instruction->GetLocations();
- size_t position = instruction->GetLifetimePosition();
if (locations->NeedsSafepoint()) {
safepoints_.push_back(instruction);
- if (locations->OnlyCallsOnSlowPath()) {
- // We add a synthesized range at this position to record the live registers
- // at this position. Ideally, we could just update the safepoints when locations
- // are updated, but we currently need to know the full stack size before updating
- // locations (because of parameters and the fact that we don't have a frame pointer).
- // And knowing the full stack size requires to know the maximum number of live
- // registers at calls in slow paths.
- // By adding the following interval in the algorithm, we can compute this
- // maximum before updating locations.
- LiveInterval* interval = LiveInterval::MakeSlowPathInterval(allocator_, instruction);
- interval->AddRange(position, position + 1);
- core_intervals_.push_back(interval);
- fp_intervals_.push_back(interval);
- }
}
}
@@ -1110,11 +1074,6 @@
bool both_directions) {
if (from->IsPrecolored()) {
// We save space by ignoring outgoing edges from fixed nodes.
- } else if (to->GetInterval()->IsSlowPathSafepoint()) {
- // Safepoint intervals are only there to count max live registers,
- // so no need to give them incoming interference edges.
- // This is also necessary for correctness, because we don't want nodes
- // to remove themselves from safepoint adjacency sets when they're pruned.
} else if (to->IsPrecolored()) {
// It is important that only a single node represents a given fixed register in the
// interference graph. We retrieve that node here.
@@ -1200,8 +1159,7 @@
void ColoringIteration::BuildInterferenceGraph(
const ArenaVector<LiveInterval*>& intervals,
- const ArenaVector<InterferenceNode*>& physical_nodes,
- ArenaVector<InterferenceNode*>* safepoints) {
+ const ArenaVector<InterferenceNode*>& physical_nodes) {
DCHECK(interval_node_map_.Empty() && prunable_nodes_.empty());
// Build the interference graph efficiently by ordering range endpoints
// by position and doing a linear sweep to find interferences. (That is, we
@@ -1236,11 +1194,6 @@
node->SetAlias(physical_node);
DCHECK_EQ(node->GetInterval()->GetRegister(),
physical_node->GetInterval()->GetRegister());
- } else if (sibling->IsSlowPathSafepoint()) {
- // Safepoint intervals are synthesized to count max live registers.
- // They will be processed separately after coloring.
- node->stage = NodeStage::kSafepoint;
- safepoints->push_back(node);
} else {
node->stage = NodeStage::kPrunable;
prunable_nodes_.push_back(node);
@@ -1494,7 +1447,6 @@
// filled by FindCoalesceOpportunities().
for (InterferenceNode* node : prunable_nodes_) {
DCHECK(!node->IsPrecolored()) << "Fixed nodes should never be pruned";
- DCHECK(!node->GetInterval()->IsSlowPathSafepoint()) << "Safepoint nodes should never be pruned";
if (IsLowDegreeNode(node, num_regs_)) {
if (node->GetCoalesceOpportunities().empty()) {
// Simplify Worklist.
@@ -1577,8 +1529,6 @@
pruned_nodes_.push(node);
for (InterferenceNode* adj : node->GetAdjacentNodes()) {
- DCHECK(!adj->GetInterval()->IsSlowPathSafepoint())
- << "Nodes should never interfere with synthesized safepoint nodes";
DCHECK_NE(adj->stage, NodeStage::kPruned) << "Should be no interferences with pruned nodes";
if (adj->IsPrecolored()) {
@@ -1938,18 +1888,6 @@
return successful;
}
-size_t RegisterAllocatorGraphColor::ComputeMaxSafepointLiveRegisters(
- const ArenaVector<InterferenceNode*>& safepoints) {
- size_t max_safepoint_live_regs = 0;
- for (InterferenceNode* safepoint : safepoints) {
- DCHECK(safepoint->GetInterval()->IsSlowPathSafepoint());
- std::bitset<kMaxNumRegs> conflict_mask = BuildConflictMask(safepoint->GetAdjacentNodes());
- size_t live_regs = conflict_mask.count();
- max_safepoint_live_regs = std::max(max_safepoint_live_regs, live_regs);
- }
- return max_safepoint_live_regs;
-}
-
void RegisterAllocatorGraphColor::AllocateSpillSlots(const ArenaVector<InterferenceNode*>& nodes) {
// The register allocation resolver will organize the stack based on value type,
// so we assign stack slots for each value type separately.
diff --git a/compiler/optimizing/register_allocator_graph_color.h b/compiler/optimizing/register_allocator_graph_color.h
index ed12561..548687f 100644
--- a/compiler/optimizing/register_allocator_graph_color.h
+++ b/compiler/optimizing/register_allocator_graph_color.h
@@ -140,10 +140,6 @@
bool IsCallerSave(size_t reg, bool processing_core_regs);
- // Return the maximum number of registers live at safepoints,
- // based on the outgoing interference edges of safepoint nodes.
- size_t ComputeMaxSafepointLiveRegisters(const ArenaVector<InterferenceNode*>& safepoints);
-
// Assigns stack slots to a list of intervals, ensuring that interfering intervals are not
// assigned the same stack slot.
void ColorSpillSlots(ArenaVector<LiveInterval*>* nodes,
@@ -187,14 +183,6 @@
// Number of stack slots needed for outgoing arguments.
const size_t reserved_out_slots_;
- // The number of globally blocked core and floating point registers, such as the stack pointer.
- size_t number_of_globally_blocked_core_regs_;
- size_t number_of_globally_blocked_fp_regs_;
-
- // The maximum number of registers live at safe points. Needed by the code generator.
- size_t max_safepoint_live_core_regs_;
- size_t max_safepoint_live_fp_regs_;
-
friend class ColoringIteration;
DISALLOW_COPY_AND_ASSIGN(RegisterAllocatorGraphColor);
diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc
index 768ed2d..6910c71 100644
--- a/compiler/optimizing/register_allocator_linear_scan.cc
+++ b/compiler/optimizing/register_allocator_linear_scan.cc
@@ -63,9 +63,7 @@
registers_array_(nullptr),
blocked_core_registers_(codegen->GetBlockedCoreRegisters()),
blocked_fp_registers_(codegen->GetBlockedFloatingPointRegisters()),
- reserved_out_slots_(0),
- maximum_number_of_live_core_registers_(0),
- maximum_number_of_live_fp_registers_(0) {
+ reserved_out_slots_(0) {
temp_intervals_.reserve(4);
int_spill_slots_.reserve(kDefaultNumberOfSpillSlots);
long_spill_slots_.reserve(kDefaultNumberOfSpillSlots);
@@ -92,8 +90,7 @@
void RegisterAllocatorLinearScan::AllocateRegisters() {
AllocateRegistersInternal();
RegisterAllocationResolver(allocator_, codegen_, liveness_)
- .Resolve(maximum_number_of_live_core_registers_,
- maximum_number_of_live_fp_registers_,
+ .Resolve(ArrayRef<HInstruction* const>(safepoints_),
reserved_out_slots_,
int_spill_slots_.size(),
long_spill_slots_.size(),
@@ -283,20 +280,6 @@
return;
}
safepoints_.push_back(instruction);
- if (locations->OnlyCallsOnSlowPath()) {
- // We add a synthesized range at this position to record the live registers
- // at this position. Ideally, we could just update the safepoints when locations
- // are updated, but we currently need to know the full stack size before updating
- // locations (because of parameters and the fact that we don't have a frame pointer).
- // And knowing the full stack size requires to know the maximum number of live
- // registers at calls in slow paths.
- // By adding the following interval in the algorithm, we can compute this
- // maximum before updating locations.
- LiveInterval* interval = LiveInterval::MakeSlowPathInterval(allocator_, instruction);
- interval->AddRange(position, position + 1);
- AddSorted(&unhandled_core_intervals_, interval);
- AddSorted(&unhandled_fp_intervals_, interval);
- }
}
if (locations->WillCall()) {
@@ -569,20 +552,6 @@
});
inactive_.erase(inactive_kept_end, inactive_to_handle_end);
- if (current->IsSlowPathSafepoint()) {
- // Synthesized interval to record the maximum number of live registers
- // at safepoints. No need to allocate a register for it.
- if (processing_core_registers_) {
- maximum_number_of_live_core_registers_ =
- std::max(maximum_number_of_live_core_registers_, active_.size());
- } else {
- maximum_number_of_live_fp_registers_ =
- std::max(maximum_number_of_live_fp_registers_, active_.size());
- }
- DCHECK(unhandled_->empty() || unhandled_->back()->GetStart() > current->GetStart());
- continue;
- }
-
if (current->IsHighInterval() && !current->GetLowInterval()->HasRegister()) {
DCHECK(!current->HasRegister());
// Allocating the low part was unsucessful. The splitted interval for the high part
@@ -685,7 +654,7 @@
// the next intersection with `current`.
for (LiveInterval* inactive : inactive_) {
// Temp/Slow-path-safepoint interval has no holes.
- DCHECK(!inactive->IsTemp() && !inactive->IsSlowPathSafepoint());
+ DCHECK(!inactive->IsTemp());
if (!current->IsSplit() && !inactive->IsFixed()) {
// Neither current nor inactive are fixed.
// Thanks to SSA, a non-split interval starting in a hole of an
@@ -933,7 +902,7 @@
// start of current.
for (LiveInterval* inactive : inactive_) {
// Temp/Slow-path-safepoint interval has no holes.
- DCHECK(!inactive->IsTemp() && !inactive->IsSlowPathSafepoint());
+ DCHECK(!inactive->IsTemp());
if (!current->IsSplit() && !inactive->IsFixed()) {
// Neither current nor inactive are fixed.
// Thanks to SSA, a non-split interval starting in a hole of an
@@ -1085,12 +1054,6 @@
if (current->StartsAfter(interval) && !current->IsHighInterval()) {
insert_at = i;
break;
- } else if ((current->GetStart() == interval->GetStart()) && current->IsSlowPathSafepoint()) {
- // Ensure the slow path interval is the last to be processed at its location: we want the
- // interval to know all live registers at this location.
- DCHECK(i == 1 || (*array)[i - 2u]->StartsAfter(current));
- insert_at = i;
- break;
}
}
diff --git a/compiler/optimizing/register_allocator_linear_scan.h b/compiler/optimizing/register_allocator_linear_scan.h
index 1a643a0..b3834f4 100644
--- a/compiler/optimizing/register_allocator_linear_scan.h
+++ b/compiler/optimizing/register_allocator_linear_scan.h
@@ -171,12 +171,6 @@
// Slots reserved for out arguments.
size_t reserved_out_slots_;
- // The maximum live core registers at safepoints.
- size_t maximum_number_of_live_core_registers_;
-
- // The maximum live FP registers at safepoints.
- size_t maximum_number_of_live_fp_registers_;
-
ART_FRIEND_TEST(RegisterAllocatorTest, FreeUntil);
ART_FRIEND_TEST(RegisterAllocatorTest, SpillInactive);
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 40fff8a..b8e1379 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -20,6 +20,7 @@
#include "base/enums.h"
#include "class_linker.h"
#include "code_generator.h"
+#include "driver/compiler_options.h"
#include "driver/dex_compilation_unit.h"
#include "utils/dex_cache_arrays_layout-inl.h"
#include "driver/compiler_driver.h"
@@ -175,6 +176,7 @@
uint32_t type_index = load_class->GetTypeIndex();
bool is_in_dex_cache = false;
+ bool is_in_boot_image = false;
HLoadClass::LoadKind desired_load_kind;
uint64_t address = 0u; // Class or dex cache element address.
{
@@ -191,45 +193,42 @@
// Compiling boot image. Check if the class is a boot image class.
DCHECK(!runtime->UseJitCompilation());
if (!compiler_driver_->GetSupportBootImageFixup()) {
- // MIPS/MIPS64 or compiler_driver_test. Do not sharpen.
+ // MIPS64 or compiler_driver_test. Do not sharpen.
desired_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
+ } else if ((klass != nullptr) && compiler_driver_->IsImageClass(
+ dex_file.StringDataByIdx(dex_file.GetTypeId(type_index).descriptor_idx_))) {
+ is_in_boot_image = true;
+ is_in_dex_cache = true;
+ desired_load_kind = codegen_->GetCompilerOptions().GetCompilePic()
+ ? HLoadClass::LoadKind::kBootImageLinkTimePcRelative
+ : HLoadClass::LoadKind::kBootImageLinkTimeAddress;
} else {
- if (klass != nullptr &&
- compiler_driver_->IsImageClass(
- dex_file.StringDataByIdx(dex_file.GetTypeId(type_index).descriptor_idx_))) {
- is_in_dex_cache = true;
- desired_load_kind = codegen_->GetCompilerOptions().GetCompilePic()
- ? HLoadClass::LoadKind::kBootImageLinkTimePcRelative
- : HLoadClass::LoadKind::kBootImageLinkTimeAddress;
- } else {
- // Not a boot image class. We must go through the dex cache.
- DCHECK(ContainsElement(compiler_driver_->GetDexFilesForOatFile(), &dex_file));
- desired_load_kind = HLoadClass::LoadKind::kDexCachePcRelative;
- }
- }
- } else if (runtime->UseJitCompilation()) {
- // TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
- // DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
- is_in_dex_cache = (klass != nullptr);
- if (klass != nullptr && runtime->GetHeap()->ObjectIsInBootImageSpace(klass)) {
- // TODO: Use direct pointers for all non-moving spaces, not just boot image. Bug: 29530787
- desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
- address = reinterpret_cast64<uint64_t>(klass);
- } else {
- // Note: If the class is not in the dex cache or isn't initialized, the
- // instruction needs environment and will not be inlined across dex files.
- // Within a dex file, the slow-path helper loads the correct class and
- // inlined frames are used correctly for OOM stack trace.
- // TODO: Write a test for this. Bug: 29416588
- desired_load_kind = HLoadClass::LoadKind::kDexCacheAddress;
- void* dex_cache_element_address = &dex_cache->GetResolvedTypes()[type_index];
- address = reinterpret_cast64<uint64_t>(dex_cache_element_address);
+ // Not a boot image class. We must go through the dex cache.
+ DCHECK(ContainsElement(compiler_driver_->GetDexFilesForOatFile(), &dex_file));
+ desired_load_kind = HLoadClass::LoadKind::kDexCachePcRelative;
}
} else {
- // AOT app compilation. Check if the class is in the boot image.
- if ((klass != nullptr) &&
- runtime->GetHeap()->ObjectIsInBootImageSpace(klass) &&
- !codegen_->GetCompilerOptions().GetCompilePic()) {
+ is_in_boot_image = (klass != nullptr) && runtime->GetHeap()->ObjectIsInBootImageSpace(klass);
+ if (runtime->UseJitCompilation()) {
+ // TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
+ // DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
+ is_in_dex_cache = (klass != nullptr);
+ if (is_in_boot_image) {
+ // TODO: Use direct pointers for all non-moving spaces, not just boot image. Bug: 29530787
+ desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
+ address = reinterpret_cast64<uint64_t>(klass);
+ } else {
+ // Note: If the class is not in the dex cache or isn't initialized, the
+ // instruction needs environment and will not be inlined across dex files.
+ // Within a dex file, the slow-path helper loads the correct class and
+ // inlined frames are used correctly for OOM stack trace.
+ // TODO: Write a test for this. Bug: 29416588
+ desired_load_kind = HLoadClass::LoadKind::kDexCacheAddress;
+ void* dex_cache_element_address = &dex_cache->GetResolvedTypes()[type_index];
+ address = reinterpret_cast64<uint64_t>(dex_cache_element_address);
+ }
+ // AOT app compilation. Check if the class is in the boot image.
+ } else if (is_in_boot_image && !codegen_->GetCompilerOptions().GetCompilePic()) {
desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
address = reinterpret_cast64<uint64_t>(klass);
} else {
@@ -246,6 +245,9 @@
if (is_in_dex_cache) {
load_class->MarkInDexCache();
}
+ if (is_in_boot_image) {
+ load_class->MarkInBootImage();
+ }
HLoadClass::LoadKind load_kind = codegen_->GetSupportedLoadClassKind(desired_load_kind);
switch (load_kind) {
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 5a574d9..f7dc112 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -303,7 +303,7 @@
}
static Primitive::Type GetPrimitiveArrayComponentType(HInstruction* array)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ReferenceTypeInfo array_type = array->GetReferenceTypeInfo();
DCHECK(array_type.IsPrimitiveArrayClass());
return array_type.GetTypeHandle()->GetComponentType()->GetPrimitiveType();
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index a01e107..a4d52d7 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -59,6 +59,38 @@
worklist->insert(insert_pos.base(), block);
}
+static bool IsLinearOrderWellFormed(const HGraph& graph) {
+ for (HBasicBlock* header : graph.GetBlocks()) {
+ if (header == nullptr || !header->IsLoopHeader()) {
+ continue;
+ }
+
+ HLoopInformation* loop = header->GetLoopInformation();
+ size_t num_blocks = loop->GetBlocks().NumSetBits();
+ size_t found_blocks = 0u;
+
+ for (HLinearOrderIterator it(graph); !it.Done(); it.Advance()) {
+ HBasicBlock* current = it.Current();
+ if (loop->Contains(*current)) {
+ found_blocks++;
+ if (found_blocks == 1u && current != header) {
+ // First block is not the header.
+ return false;
+ } else if (found_blocks == num_blocks && !loop->IsBackEdge(*current)) {
+ // Last block is not a back edge.
+ return false;
+ }
+ } else if (found_blocks != 0u && found_blocks != num_blocks) {
+ // Blocks are not adjacent.
+ return false;
+ }
+ }
+ DCHECK_EQ(found_blocks, num_blocks);
+ }
+
+ return true;
+}
+
void SsaLivenessAnalysis::LinearizeGraph() {
// Create a reverse post ordering with the following properties:
// - Blocks in a loop are consecutive,
@@ -100,6 +132,8 @@
forward_predecessors[block_id] = number_of_remaining_predecessors - 1;
}
} while (!worklist.empty());
+
+ DCHECK(graph_->HasIrreducibleLoops() || IsLinearOrderWellFormed(*graph_));
}
void SsaLivenessAnalysis::NumberInstructions() {
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 92788fe..0be1611 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -208,11 +208,6 @@
return new (allocator) LiveInterval(allocator, type, instruction);
}
- static LiveInterval* MakeSlowPathInterval(ArenaAllocator* allocator, HInstruction* instruction) {
- return new (allocator) LiveInterval(
- allocator, Primitive::kPrimVoid, instruction, false, kNoRegister, false, true);
- }
-
static LiveInterval* MakeFixedInterval(ArenaAllocator* allocator, int reg, Primitive::Type type) {
return new (allocator) LiveInterval(allocator, type, nullptr, true, reg, false);
}
@@ -223,7 +218,6 @@
bool IsFixed() const { return is_fixed_; }
bool IsTemp() const { return is_temp_; }
- bool IsSlowPathSafepoint() const { return is_slow_path_safepoint_; }
// This interval is the result of a split.
bool IsSplit() const { return parent_ != this; }
@@ -790,7 +784,7 @@
DCHECK(!HasHighInterval());
DCHECK(!HasLowInterval());
high_or_low_interval_ = new (allocator_) LiveInterval(
- allocator_, type_, defined_by_, false, kNoRegister, is_temp, false, true);
+ allocator_, type_, defined_by_, false, kNoRegister, is_temp, true);
high_or_low_interval_->high_or_low_interval_ = this;
if (first_range_ != nullptr) {
high_or_low_interval_->first_range_ = first_range_->Dup(allocator_);
@@ -919,7 +913,6 @@
bool is_fixed = false,
int reg = kNoRegister,
bool is_temp = false,
- bool is_slow_path_safepoint = false,
bool is_high_interval = false)
: allocator_(allocator),
first_range_(nullptr),
@@ -936,7 +929,6 @@
spill_slot_(kNoSpillSlot),
is_fixed_(is_fixed),
is_temp_(is_temp),
- is_slow_path_safepoint_(is_slow_path_safepoint),
is_high_interval_(is_high_interval),
high_or_low_interval_(nullptr),
defined_by_(defined_by) {}
@@ -983,38 +975,6 @@
return false;
}
- bool IsLinearOrderWellFormed(const HGraph& graph) {
- for (HBasicBlock* header : graph.GetBlocks()) {
- if (header == nullptr || !header->IsLoopHeader()) {
- continue;
- }
-
- HLoopInformation* loop = header->GetLoopInformation();
- size_t num_blocks = loop->GetBlocks().NumSetBits();
- size_t found_blocks = 0u;
-
- for (HLinearOrderIterator it(graph); !it.Done(); it.Advance()) {
- HBasicBlock* current = it.Current();
- if (loop->Contains(*current)) {
- found_blocks++;
- if (found_blocks == 1u && current != header) {
- // First block is not the header.
- return false;
- } else if (found_blocks == num_blocks && !loop->IsBackEdge(*current)) {
- // Last block is not a back edge.
- return false;
- }
- } else if (found_blocks != 0u && found_blocks != num_blocks) {
- // Blocks are not adjacent.
- return false;
- }
- }
- DCHECK_EQ(found_blocks, num_blocks);
- }
-
- return true;
- }
-
void AddBackEdgeUses(const HBasicBlock& block_at_use) {
DCHECK(block_at_use.IsInLoop());
if (block_at_use.GetGraph()->HasIrreducibleLoops()) {
@@ -1024,8 +984,6 @@
return;
}
- DCHECK(IsLinearOrderWellFormed(*block_at_use.GetGraph()));
-
// Add synthesized uses at the back edge of loops to help the register allocator.
// Note that this method is called in decreasing liveness order, to faciliate adding
// uses at the head of the `first_use_` linked list. Because below
@@ -1121,9 +1079,6 @@
// Whether the interval is for a temporary.
const bool is_temp_;
- // Whether the interval is for a safepoint that calls on slow path.
- const bool is_slow_path_safepoint_;
-
// Whether this interval is a synthesized interval for register pair.
const bool is_high_interval_;
diff --git a/compiler/optimizing/x86_memory_gen.cc b/compiler/optimizing/x86_memory_gen.cc
index 8aa315a..4e25683 100644
--- a/compiler/optimizing/x86_memory_gen.cc
+++ b/compiler/optimizing/x86_memory_gen.cc
@@ -16,6 +16,7 @@
#include "x86_memory_gen.h"
#include "code_generator.h"
+#include "driver/compiler_options.h"
namespace art {
namespace x86 {
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index 55835e7..70f290d 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -152,7 +152,7 @@
__ LoadFromOffset(kLoadWord, T9, S1, offset.Int32Value());
}
__ Jr(T9);
- __ Nop();
+ __ NopIfNoReordering();
__ Break();
__ FinalizeCode();
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
index 719fe7f..a03dd74 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
@@ -73,12 +73,11 @@
cfi().RelOffsetForMany(DWARFReg(r0), 0, core_spill_mask, kFramePointerSize);
if (fp_spill_mask != 0) {
uint32_t first = CTZ(fp_spill_mask);
- uint32_t last = first + POPCOUNT(fp_spill_mask) - 1;
// Check that list is contiguous.
DCHECK_EQ(fp_spill_mask >> CTZ(fp_spill_mask), ~0u >> (32 - POPCOUNT(fp_spill_mask)));
- ___ Vpush(SRegisterList(vixl32::SRegister(first), vixl32::SRegister(last)));
+ ___ Vpush(SRegisterList(vixl32::SRegister(first), POPCOUNT(fp_spill_mask)));
cfi().AdjustCFAOffset(POPCOUNT(fp_spill_mask) * kFramePointerSize);
cfi().RelOffsetForMany(DWARFReg(s0), 0, fp_spill_mask, kFramePointerSize);
}
@@ -136,11 +135,10 @@
if (fp_spill_mask != 0) {
uint32_t first = CTZ(fp_spill_mask);
- uint32_t last = first + POPCOUNT(fp_spill_mask) - 1;
// Check that list is contiguous.
DCHECK_EQ(fp_spill_mask >> CTZ(fp_spill_mask), ~0u >> (32 - POPCOUNT(fp_spill_mask)));
- ___ Vpop(SRegisterList(vixl32::SRegister(first), vixl32::SRegister(last)));
+ ___ Vpop(SRegisterList(vixl32::SRegister(first), POPCOUNT(fp_spill_mask)));
cfi().AdjustCFAOffset(-kFramePointerSize * POPCOUNT(fp_spill_mask));
cfi().RestoreMany(DWARFReg(s0), fp_spill_mask);
}
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 8981776..b616057 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -362,6 +362,16 @@
// Size of generated code
virtual size_t CodeSize() const { return buffer_.Size(); }
virtual const uint8_t* CodeBufferBaseAddress() const { return buffer_.contents(); }
+ // CodePosition() is a non-const method similar to CodeSize(), which is used to
+ // record positions within the code buffer for the purpose of signal handling
+ // (stack overflow checks and implicit null checks may trigger signals and the
+ // signal handlers expect them right before the recorded positions).
+ // On most architectures CodePosition() should be equivalent to CodeSize(), but
+ // the MIPS assembler needs to be aware of this recording, so it doesn't put
+ // the instructions that can trigger signals into branch delay slots. Handling
+ // signals from instructions in delay slots is a bit problematic and should be
+ // avoided.
+ virtual size_t CodePosition() { return CodeSize(); }
// Copy instructions out of assembly buffer into the given region of memory
virtual void FinalizeInstructions(const MemoryRegion& region) {
diff --git a/compiler/utils/intrusive_forward_list_test.cc b/compiler/utils/intrusive_forward_list_test.cc
index 517142e..f2efa4d 100644
--- a/compiler/utils/intrusive_forward_list_test.cc
+++ b/compiler/utils/intrusive_forward_list_test.cc
@@ -39,12 +39,12 @@
return lhs.value < rhs.value;
}
-#define ASSERT_LISTS_EQUAL(expected, value) \
- do { \
- ASSERT_EQ(expected.empty(), value.empty()); \
- ASSERT_EQ(std::distance(expected.begin(), expected.end()), \
- std::distance(value.begin(), value.end())); \
- ASSERT_TRUE(std::equal(expected.begin(), expected.end(), value.begin())); \
+#define ASSERT_LISTS_EQUAL(expected, value) \
+ do { \
+ ASSERT_EQ((expected).empty(), (value).empty()); \
+ ASSERT_EQ(std::distance((expected).begin(), (expected).end()), \
+ std::distance((value).begin(), (value).end())); \
+ ASSERT_TRUE(std::equal((expected).begin(), (expected).end(), (value).begin())); \
} while (false)
TEST(IntrusiveForwardList, IteratorToConstIterator) {
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index bfc63d1..4b580b6 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -40,10 +40,195 @@
return os;
}
+MipsAssembler::DelaySlot::DelaySlot()
+ : instruction_(0),
+ gpr_outs_mask_(0),
+ gpr_ins_mask_(0),
+ fpr_outs_mask_(0),
+ fpr_ins_mask_(0),
+ cc_outs_mask_(0),
+ cc_ins_mask_(0) {}
+
+void MipsAssembler::DsFsmInstr(uint32_t instruction,
+ uint32_t gpr_outs_mask,
+ uint32_t gpr_ins_mask,
+ uint32_t fpr_outs_mask,
+ uint32_t fpr_ins_mask,
+ uint32_t cc_outs_mask,
+ uint32_t cc_ins_mask) {
+ if (!reordering_) {
+ CHECK_EQ(ds_fsm_state_, kExpectingLabel);
+ CHECK_EQ(delay_slot_.instruction_, 0u);
+ return;
+ }
+ switch (ds_fsm_state_) {
+ case kExpectingLabel:
+ break;
+ case kExpectingInstruction:
+ CHECK_EQ(ds_fsm_target_pc_ + sizeof(uint32_t), buffer_.Size());
+ // If the last instruction is not suitable for delay slots, drop
+ // the PC of the label preceding it so that no unconditional branch
+ // uses this instruction to fill its delay slot.
+ if (instruction == 0) {
+ DsFsmDropLabel(); // Sets ds_fsm_state_ = kExpectingLabel.
+ } else {
+ // Otherwise wait for another instruction or label before we can
+ // commit the label PC. The label PC will be dropped if instead
+ // of another instruction or label there's a call from the code
+ // generator to CodePosition() to record the buffer size.
+ // Instructions after which the buffer size is recorded cannot
+ // be moved into delay slots or anywhere else because they may
+ // trigger signals and the signal handlers expect these signals
+ // to be coming from the instructions immediately preceding the
+ // recorded buffer locations.
+ ds_fsm_state_ = kExpectingCommit;
+ }
+ break;
+ case kExpectingCommit:
+ CHECK_EQ(ds_fsm_target_pc_ + 2 * sizeof(uint32_t), buffer_.Size());
+ DsFsmCommitLabel(); // Sets ds_fsm_state_ = kExpectingLabel.
+ break;
+ }
+ delay_slot_.instruction_ = instruction;
+ delay_slot_.gpr_outs_mask_ = gpr_outs_mask & ~1u; // Ignore register ZERO.
+ delay_slot_.gpr_ins_mask_ = gpr_ins_mask & ~1u; // Ignore register ZERO.
+ delay_slot_.fpr_outs_mask_ = fpr_outs_mask;
+ delay_slot_.fpr_ins_mask_ = fpr_ins_mask;
+ delay_slot_.cc_outs_mask_ = cc_outs_mask;
+ delay_slot_.cc_ins_mask_ = cc_ins_mask;
+}
+
+void MipsAssembler::DsFsmLabel() {
+ if (!reordering_) {
+ CHECK_EQ(ds_fsm_state_, kExpectingLabel);
+ CHECK_EQ(delay_slot_.instruction_, 0u);
+ return;
+ }
+ switch (ds_fsm_state_) {
+ case kExpectingLabel:
+ ds_fsm_target_pc_ = buffer_.Size();
+ ds_fsm_state_ = kExpectingInstruction;
+ break;
+ case kExpectingInstruction:
+ // Allow consecutive labels.
+ CHECK_EQ(ds_fsm_target_pc_, buffer_.Size());
+ break;
+ case kExpectingCommit:
+ CHECK_EQ(ds_fsm_target_pc_ + sizeof(uint32_t), buffer_.Size());
+ DsFsmCommitLabel();
+ ds_fsm_target_pc_ = buffer_.Size();
+ ds_fsm_state_ = kExpectingInstruction;
+ break;
+ }
+ // We cannot move instructions into delay slots across labels.
+ delay_slot_.instruction_ = 0;
+}
+
+void MipsAssembler::DsFsmCommitLabel() {
+ if (ds_fsm_state_ == kExpectingCommit) {
+ ds_fsm_target_pcs_.emplace_back(ds_fsm_target_pc_);
+ }
+ ds_fsm_state_ = kExpectingLabel;
+}
+
+void MipsAssembler::DsFsmDropLabel() {
+ ds_fsm_state_ = kExpectingLabel;
+}
+
+bool MipsAssembler::SetReorder(bool enable) {
+ bool last_state = reordering_;
+ if (last_state != enable) {
+ DsFsmCommitLabel();
+ DsFsmInstrNop(0);
+ }
+ reordering_ = enable;
+ return last_state;
+}
+
+size_t MipsAssembler::CodePosition() {
+ // The last instruction cannot be used in a delay slot, do not commit
+ // the label before it (if any) and clear the delay slot.
+ DsFsmDropLabel();
+ DsFsmInstrNop(0);
+ size_t size = buffer_.Size();
+ // In theory we can get the following sequence:
+ // label1:
+ // instr
+ // label2: # label1 gets committed when label2 is seen
+ // CodePosition() call
+ // and we need to uncommit label1.
+ if (ds_fsm_target_pcs_.size() != 0 && ds_fsm_target_pcs_.back() + sizeof(uint32_t) == size) {
+ ds_fsm_target_pcs_.pop_back();
+ }
+ return size;
+}
+
+void MipsAssembler::DsFsmInstrNop(uint32_t instruction ATTRIBUTE_UNUSED) {
+ DsFsmInstr(0, 0, 0, 0, 0, 0, 0);
+}
+
+void MipsAssembler::DsFsmInstrRrr(uint32_t instruction, Register out, Register in1, Register in2) {
+ DsFsmInstr(instruction, (1u << out), (1u << in1) | (1u << in2), 0, 0, 0, 0);
+}
+
+void MipsAssembler::DsFsmInstrRrrr(uint32_t instruction,
+ Register in1_out,
+ Register in2,
+ Register in3) {
+ DsFsmInstr(instruction, (1u << in1_out), (1u << in1_out) | (1u << in2) | (1u << in3), 0, 0, 0, 0);
+}
+
+void MipsAssembler::DsFsmInstrFff(uint32_t instruction,
+ FRegister out,
+ FRegister in1,
+ FRegister in2) {
+ DsFsmInstr(instruction, 0, 0, (1u << out), (1u << in1) | (1u << in2), 0, 0);
+}
+
+void MipsAssembler::DsFsmInstrFfff(uint32_t instruction,
+ FRegister in1_out,
+ FRegister in2,
+ FRegister in3) {
+ DsFsmInstr(instruction, 0, 0, (1u << in1_out), (1u << in1_out) | (1u << in2) | (1u << in3), 0, 0);
+}
+
+void MipsAssembler::DsFsmInstrRf(uint32_t instruction, Register out, FRegister in) {
+ DsFsmInstr(instruction, (1u << out), 0, 0, (1u << in), 0, 0);
+}
+
+void MipsAssembler::DsFsmInstrFr(uint32_t instruction, FRegister out, Register in) {
+ DsFsmInstr(instruction, 0, (1u << in), (1u << out), 0, 0, 0);
+}
+
+void MipsAssembler::DsFsmInstrFR(uint32_t instruction, FRegister in1, Register in2) {
+ DsFsmInstr(instruction, 0, (1u << in2), 0, (1u << in1), 0, 0);
+}
+
+void MipsAssembler::DsFsmInstrCff(uint32_t instruction, int cc_out, FRegister in1, FRegister in2) {
+ DsFsmInstr(instruction, 0, 0, 0, (1u << in1) | (1u << in2), (1 << cc_out), 0);
+}
+
+void MipsAssembler::DsFsmInstrRrrc(uint32_t instruction,
+ Register in1_out,
+ Register in2,
+ int cc_in) {
+ DsFsmInstr(instruction, (1u << in1_out), (1u << in1_out) | (1u << in2), 0, 0, 0, (1 << cc_in));
+}
+
+void MipsAssembler::DsFsmInstrFffc(uint32_t instruction,
+ FRegister in1_out,
+ FRegister in2,
+ int cc_in) {
+ DsFsmInstr(instruction, 0, 0, (1u << in1_out), (1u << in1_out) | (1u << in2), 0, (1 << cc_in));
+}
+
void MipsAssembler::FinalizeCode() {
for (auto& exception_block : exception_blocks_) {
EmitExceptionPoll(&exception_block);
}
+ // Commit the last branch target label (if any) and disable instruction reordering.
+ DsFsmCommitLabel();
+ SetReorder(false);
EmitLiterals();
PromoteBranches();
}
@@ -107,6 +292,12 @@
void MipsAssembler::EmitBranches() {
CHECK(!overwriting_);
+ CHECK(!reordering_);
+ // Now that everything has its final position in the buffer (the branches have
+ // been promoted), adjust the target label PCs.
+ for (size_t cnt = ds_fsm_target_pcs_.size(), i = 0; i < cnt; i++) {
+ ds_fsm_target_pcs_[i] = GetAdjustedPosition(ds_fsm_target_pcs_[i]);
+ }
// Switch from appending instructions at the end of the buffer to overwriting
// existing instructions (branch placeholders) in the buffer.
overwriting_ = true;
@@ -128,7 +319,12 @@
}
}
-void MipsAssembler::EmitR(int opcode, Register rs, Register rt, Register rd, int shamt, int funct) {
+uint32_t MipsAssembler::EmitR(int opcode,
+ Register rs,
+ Register rt,
+ Register rd,
+ int shamt,
+ int funct) {
CHECK_NE(rs, kNoRegister);
CHECK_NE(rt, kNoRegister);
CHECK_NE(rd, kNoRegister);
@@ -139,9 +335,10 @@
shamt << kShamtShift |
funct;
Emit(encoding);
+ return encoding;
}
-void MipsAssembler::EmitI(int opcode, Register rs, Register rt, uint16_t imm) {
+uint32_t MipsAssembler::EmitI(int opcode, Register rs, Register rt, uint16_t imm) {
CHECK_NE(rs, kNoRegister);
CHECK_NE(rt, kNoRegister);
uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
@@ -149,25 +346,32 @@
static_cast<uint32_t>(rt) << kRtShift |
imm;
Emit(encoding);
+ return encoding;
}
-void MipsAssembler::EmitI21(int opcode, Register rs, uint32_t imm21) {
+uint32_t MipsAssembler::EmitI21(int opcode, Register rs, uint32_t imm21) {
CHECK_NE(rs, kNoRegister);
CHECK(IsUint<21>(imm21)) << imm21;
uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
static_cast<uint32_t>(rs) << kRsShift |
imm21;
Emit(encoding);
+ return encoding;
}
-void MipsAssembler::EmitI26(int opcode, uint32_t imm26) {
+uint32_t MipsAssembler::EmitI26(int opcode, uint32_t imm26) {
CHECK(IsUint<26>(imm26)) << imm26;
uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift | imm26;
Emit(encoding);
+ return encoding;
}
-void MipsAssembler::EmitFR(int opcode, int fmt, FRegister ft, FRegister fs, FRegister fd,
- int funct) {
+uint32_t MipsAssembler::EmitFR(int opcode,
+ int fmt,
+ FRegister ft,
+ FRegister fs,
+ FRegister fd,
+ int funct) {
CHECK_NE(ft, kNoFRegister);
CHECK_NE(fs, kNoFRegister);
CHECK_NE(fd, kNoFRegister);
@@ -178,52 +382,54 @@
static_cast<uint32_t>(fd) << kFdShift |
funct;
Emit(encoding);
+ return encoding;
}
-void MipsAssembler::EmitFI(int opcode, int fmt, FRegister ft, uint16_t imm) {
+uint32_t MipsAssembler::EmitFI(int opcode, int fmt, FRegister ft, uint16_t imm) {
CHECK_NE(ft, kNoFRegister);
uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
fmt << kFmtShift |
static_cast<uint32_t>(ft) << kFtShift |
imm;
Emit(encoding);
+ return encoding;
}
void MipsAssembler::Addu(Register rd, Register rs, Register rt) {
- EmitR(0, rs, rt, rd, 0, 0x21);
+ DsFsmInstrRrr(EmitR(0, rs, rt, rd, 0, 0x21), rd, rs, rt);
}
void MipsAssembler::Addiu(Register rt, Register rs, uint16_t imm16) {
- EmitI(0x9, rs, rt, imm16);
+ DsFsmInstrRrr(EmitI(0x9, rs, rt, imm16), rt, rs, rs);
}
void MipsAssembler::Subu(Register rd, Register rs, Register rt) {
- EmitR(0, rs, rt, rd, 0, 0x23);
+ DsFsmInstrRrr(EmitR(0, rs, rt, rd, 0, 0x23), rd, rs, rt);
}
void MipsAssembler::MultR2(Register rs, Register rt) {
CHECK(!IsR6());
- EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x18);
+ DsFsmInstrRrr(EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x18), ZERO, rs, rt);
}
void MipsAssembler::MultuR2(Register rs, Register rt) {
CHECK(!IsR6());
- EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x19);
+ DsFsmInstrRrr(EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x19), ZERO, rs, rt);
}
void MipsAssembler::DivR2(Register rs, Register rt) {
CHECK(!IsR6());
- EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x1a);
+ DsFsmInstrRrr(EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x1a), ZERO, rs, rt);
}
void MipsAssembler::DivuR2(Register rs, Register rt) {
CHECK(!IsR6());
- EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x1b);
+ DsFsmInstrRrr(EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x1b), ZERO, rs, rt);
}
void MipsAssembler::MulR2(Register rd, Register rs, Register rt) {
CHECK(!IsR6());
- EmitR(0x1c, rs, rt, rd, 0, 2);
+ DsFsmInstrRrr(EmitR(0x1c, rs, rt, rd, 0, 2), rd, rs, rt);
}
void MipsAssembler::DivR2(Register rd, Register rs, Register rt) {
@@ -252,308 +458,307 @@
void MipsAssembler::MulR6(Register rd, Register rs, Register rt) {
CHECK(IsR6());
- EmitR(0, rs, rt, rd, 2, 0x18);
+ DsFsmInstrRrr(EmitR(0, rs, rt, rd, 2, 0x18), rd, rs, rt);
}
void MipsAssembler::MuhR6(Register rd, Register rs, Register rt) {
CHECK(IsR6());
- EmitR(0, rs, rt, rd, 3, 0x18);
+ DsFsmInstrRrr(EmitR(0, rs, rt, rd, 3, 0x18), rd, rs, rt);
}
void MipsAssembler::MuhuR6(Register rd, Register rs, Register rt) {
CHECK(IsR6());
- EmitR(0, rs, rt, rd, 3, 0x19);
+ DsFsmInstrRrr(EmitR(0, rs, rt, rd, 3, 0x19), rd, rs, rt);
}
void MipsAssembler::DivR6(Register rd, Register rs, Register rt) {
CHECK(IsR6());
- EmitR(0, rs, rt, rd, 2, 0x1a);
+ DsFsmInstrRrr(EmitR(0, rs, rt, rd, 2, 0x1a), rd, rs, rt);
}
void MipsAssembler::ModR6(Register rd, Register rs, Register rt) {
CHECK(IsR6());
- EmitR(0, rs, rt, rd, 3, 0x1a);
+ DsFsmInstrRrr(EmitR(0, rs, rt, rd, 3, 0x1a), rd, rs, rt);
}
void MipsAssembler::DivuR6(Register rd, Register rs, Register rt) {
CHECK(IsR6());
- EmitR(0, rs, rt, rd, 2, 0x1b);
+ DsFsmInstrRrr(EmitR(0, rs, rt, rd, 2, 0x1b), rd, rs, rt);
}
void MipsAssembler::ModuR6(Register rd, Register rs, Register rt) {
CHECK(IsR6());
- EmitR(0, rs, rt, rd, 3, 0x1b);
+ DsFsmInstrRrr(EmitR(0, rs, rt, rd, 3, 0x1b), rd, rs, rt);
}
void MipsAssembler::And(Register rd, Register rs, Register rt) {
- EmitR(0, rs, rt, rd, 0, 0x24);
+ DsFsmInstrRrr(EmitR(0, rs, rt, rd, 0, 0x24), rd, rs, rt);
}
void MipsAssembler::Andi(Register rt, Register rs, uint16_t imm16) {
- EmitI(0xc, rs, rt, imm16);
+ DsFsmInstrRrr(EmitI(0xc, rs, rt, imm16), rt, rs, rs);
}
void MipsAssembler::Or(Register rd, Register rs, Register rt) {
- EmitR(0, rs, rt, rd, 0, 0x25);
+ DsFsmInstrRrr(EmitR(0, rs, rt, rd, 0, 0x25), rd, rs, rt);
}
void MipsAssembler::Ori(Register rt, Register rs, uint16_t imm16) {
- EmitI(0xd, rs, rt, imm16);
+ DsFsmInstrRrr(EmitI(0xd, rs, rt, imm16), rt, rs, rs);
}
void MipsAssembler::Xor(Register rd, Register rs, Register rt) {
- EmitR(0, rs, rt, rd, 0, 0x26);
+ DsFsmInstrRrr(EmitR(0, rs, rt, rd, 0, 0x26), rd, rs, rt);
}
void MipsAssembler::Xori(Register rt, Register rs, uint16_t imm16) {
- EmitI(0xe, rs, rt, imm16);
+ DsFsmInstrRrr(EmitI(0xe, rs, rt, imm16), rt, rs, rs);
}
void MipsAssembler::Nor(Register rd, Register rs, Register rt) {
- EmitR(0, rs, rt, rd, 0, 0x27);
+ DsFsmInstrRrr(EmitR(0, rs, rt, rd, 0, 0x27), rd, rs, rt);
}
void MipsAssembler::Movz(Register rd, Register rs, Register rt) {
CHECK(!IsR6());
- EmitR(0, rs, rt, rd, 0, 0x0A);
+ DsFsmInstrRrrr(EmitR(0, rs, rt, rd, 0, 0x0A), rd, rs, rt);
}
void MipsAssembler::Movn(Register rd, Register rs, Register rt) {
CHECK(!IsR6());
- EmitR(0, rs, rt, rd, 0, 0x0B);
+ DsFsmInstrRrrr(EmitR(0, rs, rt, rd, 0, 0x0B), rd, rs, rt);
}
void MipsAssembler::Seleqz(Register rd, Register rs, Register rt) {
CHECK(IsR6());
- EmitR(0, rs, rt, rd, 0, 0x35);
+ DsFsmInstrRrr(EmitR(0, rs, rt, rd, 0, 0x35), rd, rs, rt);
}
void MipsAssembler::Selnez(Register rd, Register rs, Register rt) {
CHECK(IsR6());
- EmitR(0, rs, rt, rd, 0, 0x37);
+ DsFsmInstrRrr(EmitR(0, rs, rt, rd, 0, 0x37), rd, rs, rt);
}
void MipsAssembler::ClzR6(Register rd, Register rs) {
CHECK(IsR6());
- EmitR(0, rs, static_cast<Register>(0), rd, 0x01, 0x10);
+ DsFsmInstrRrr(EmitR(0, rs, static_cast<Register>(0), rd, 0x01, 0x10), rd, rs, rs);
}
void MipsAssembler::ClzR2(Register rd, Register rs) {
CHECK(!IsR6());
- EmitR(0x1C, rs, rd, rd, 0, 0x20);
+ DsFsmInstrRrr(EmitR(0x1C, rs, rd, rd, 0, 0x20), rd, rs, rs);
}
void MipsAssembler::CloR6(Register rd, Register rs) {
CHECK(IsR6());
- EmitR(0, rs, static_cast<Register>(0), rd, 0x01, 0x11);
+ DsFsmInstrRrr(EmitR(0, rs, static_cast<Register>(0), rd, 0x01, 0x11), rd, rs, rs);
}
void MipsAssembler::CloR2(Register rd, Register rs) {
CHECK(!IsR6());
- EmitR(0x1C, rs, rd, rd, 0, 0x21);
+ DsFsmInstrRrr(EmitR(0x1C, rs, rd, rd, 0, 0x21), rd, rs, rs);
}
void MipsAssembler::Seb(Register rd, Register rt) {
- EmitR(0x1f, static_cast<Register>(0), rt, rd, 0x10, 0x20);
+ DsFsmInstrRrr(EmitR(0x1f, static_cast<Register>(0), rt, rd, 0x10, 0x20), rd, rt, rt);
}
void MipsAssembler::Seh(Register rd, Register rt) {
- EmitR(0x1f, static_cast<Register>(0), rt, rd, 0x18, 0x20);
+ DsFsmInstrRrr(EmitR(0x1f, static_cast<Register>(0), rt, rd, 0x18, 0x20), rd, rt, rt);
}
void MipsAssembler::Wsbh(Register rd, Register rt) {
- EmitR(0x1f, static_cast<Register>(0), rt, rd, 2, 0x20);
+ DsFsmInstrRrr(EmitR(0x1f, static_cast<Register>(0), rt, rd, 2, 0x20), rd, rt, rt);
}
void MipsAssembler::Bitswap(Register rd, Register rt) {
CHECK(IsR6());
- EmitR(0x1f, static_cast<Register>(0), rt, rd, 0x0, 0x20);
+ DsFsmInstrRrr(EmitR(0x1f, static_cast<Register>(0), rt, rd, 0x0, 0x20), rd, rt, rt);
}
void MipsAssembler::Sll(Register rd, Register rt, int shamt) {
CHECK(IsUint<5>(shamt)) << shamt;
- EmitR(0, static_cast<Register>(0), rt, rd, shamt, 0x00);
+ DsFsmInstrRrr(EmitR(0, static_cast<Register>(0), rt, rd, shamt, 0x00), rd, rt, rt);
}
void MipsAssembler::Srl(Register rd, Register rt, int shamt) {
CHECK(IsUint<5>(shamt)) << shamt;
- EmitR(0, static_cast<Register>(0), rt, rd, shamt, 0x02);
+ DsFsmInstrRrr(EmitR(0, static_cast<Register>(0), rt, rd, shamt, 0x02), rd, rt, rt);
}
void MipsAssembler::Rotr(Register rd, Register rt, int shamt) {
CHECK(IsUint<5>(shamt)) << shamt;
- EmitR(0, static_cast<Register>(1), rt, rd, shamt, 0x02);
+ DsFsmInstrRrr(EmitR(0, static_cast<Register>(1), rt, rd, shamt, 0x02), rd, rt, rt);
}
void MipsAssembler::Sra(Register rd, Register rt, int shamt) {
CHECK(IsUint<5>(shamt)) << shamt;
- EmitR(0, static_cast<Register>(0), rt, rd, shamt, 0x03);
+ DsFsmInstrRrr(EmitR(0, static_cast<Register>(0), rt, rd, shamt, 0x03), rd, rt, rt);
}
void MipsAssembler::Sllv(Register rd, Register rt, Register rs) {
- EmitR(0, rs, rt, rd, 0, 0x04);
+ DsFsmInstrRrr(EmitR(0, rs, rt, rd, 0, 0x04), rd, rs, rt);
}
void MipsAssembler::Srlv(Register rd, Register rt, Register rs) {
- EmitR(0, rs, rt, rd, 0, 0x06);
+ DsFsmInstrRrr(EmitR(0, rs, rt, rd, 0, 0x06), rd, rs, rt);
}
void MipsAssembler::Rotrv(Register rd, Register rt, Register rs) {
- EmitR(0, rs, rt, rd, 1, 0x06);
+ DsFsmInstrRrr(EmitR(0, rs, rt, rd, 1, 0x06), rd, rs, rt);
}
void MipsAssembler::Srav(Register rd, Register rt, Register rs) {
- EmitR(0, rs, rt, rd, 0, 0x07);
+ DsFsmInstrRrr(EmitR(0, rs, rt, rd, 0, 0x07), rd, rs, rt);
}
void MipsAssembler::Ext(Register rd, Register rt, int pos, int size) {
CHECK(IsUint<5>(pos)) << pos;
CHECK(0 < size && size <= 32) << size;
CHECK(0 < pos + size && pos + size <= 32) << pos << " + " << size;
- EmitR(0x1f, rt, rd, static_cast<Register>(size - 1), pos, 0x00);
+ DsFsmInstrRrr(EmitR(0x1f, rt, rd, static_cast<Register>(size - 1), pos, 0x00), rd, rt, rt);
}
void MipsAssembler::Ins(Register rd, Register rt, int pos, int size) {
CHECK(IsUint<5>(pos)) << pos;
CHECK(0 < size && size <= 32) << size;
CHECK(0 < pos + size && pos + size <= 32) << pos << " + " << size;
- EmitR(0x1f, rt, rd, static_cast<Register>(pos + size - 1), pos, 0x04);
+ DsFsmInstrRrr(EmitR(0x1f, rt, rd, static_cast<Register>(pos + size - 1), pos, 0x04), rd, rd, rt);
}
void MipsAssembler::Lb(Register rt, Register rs, uint16_t imm16) {
- EmitI(0x20, rs, rt, imm16);
+ DsFsmInstrRrr(EmitI(0x20, rs, rt, imm16), rt, rs, rs);
}
void MipsAssembler::Lh(Register rt, Register rs, uint16_t imm16) {
- EmitI(0x21, rs, rt, imm16);
+ DsFsmInstrRrr(EmitI(0x21, rs, rt, imm16), rt, rs, rs);
}
void MipsAssembler::Lw(Register rt, Register rs, uint16_t imm16) {
- EmitI(0x23, rs, rt, imm16);
+ DsFsmInstrRrr(EmitI(0x23, rs, rt, imm16), rt, rs, rs);
}
void MipsAssembler::Lwl(Register rt, Register rs, uint16_t imm16) {
CHECK(!IsR6());
- EmitI(0x22, rs, rt, imm16);
+ DsFsmInstrRrr(EmitI(0x22, rs, rt, imm16), rt, rt, rs);
}
void MipsAssembler::Lwr(Register rt, Register rs, uint16_t imm16) {
CHECK(!IsR6());
- EmitI(0x26, rs, rt, imm16);
+ DsFsmInstrRrr(EmitI(0x26, rs, rt, imm16), rt, rt, rs);
}
void MipsAssembler::Lbu(Register rt, Register rs, uint16_t imm16) {
- EmitI(0x24, rs, rt, imm16);
+ DsFsmInstrRrr(EmitI(0x24, rs, rt, imm16), rt, rs, rs);
}
void MipsAssembler::Lhu(Register rt, Register rs, uint16_t imm16) {
- EmitI(0x25, rs, rt, imm16);
+ DsFsmInstrRrr(EmitI(0x25, rs, rt, imm16), rt, rs, rs);
}
void MipsAssembler::Lwpc(Register rs, uint32_t imm19) {
CHECK(IsR6());
CHECK(IsUint<19>(imm19)) << imm19;
- EmitI21(0x3B, rs, (0x01 << 19) | imm19);
+ DsFsmInstrNop(EmitI21(0x3B, rs, (0x01 << 19) | imm19));
}
void MipsAssembler::Lui(Register rt, uint16_t imm16) {
- EmitI(0xf, static_cast<Register>(0), rt, imm16);
+ DsFsmInstrRrr(EmitI(0xf, static_cast<Register>(0), rt, imm16), rt, ZERO, ZERO);
}
void MipsAssembler::Aui(Register rt, Register rs, uint16_t imm16) {
CHECK(IsR6());
- EmitI(0xf, rs, rt, imm16);
+ DsFsmInstrRrr(EmitI(0xf, rs, rt, imm16), rt, rt, rs);
}
void MipsAssembler::Sync(uint32_t stype) {
- EmitR(0, static_cast<Register>(0), static_cast<Register>(0), static_cast<Register>(0),
- stype & 0x1f, 0xf);
+ DsFsmInstrNop(EmitR(0, ZERO, ZERO, ZERO, stype & 0x1f, 0xf));
}
void MipsAssembler::Mfhi(Register rd) {
CHECK(!IsR6());
- EmitR(0, static_cast<Register>(0), static_cast<Register>(0), rd, 0, 0x10);
+ DsFsmInstrRrr(EmitR(0, ZERO, ZERO, rd, 0, 0x10), rd, ZERO, ZERO);
}
void MipsAssembler::Mflo(Register rd) {
CHECK(!IsR6());
- EmitR(0, static_cast<Register>(0), static_cast<Register>(0), rd, 0, 0x12);
+ DsFsmInstrRrr(EmitR(0, ZERO, ZERO, rd, 0, 0x12), rd, ZERO, ZERO);
}
void MipsAssembler::Sb(Register rt, Register rs, uint16_t imm16) {
- EmitI(0x28, rs, rt, imm16);
+ DsFsmInstrRrr(EmitI(0x28, rs, rt, imm16), ZERO, rt, rs);
}
void MipsAssembler::Sh(Register rt, Register rs, uint16_t imm16) {
- EmitI(0x29, rs, rt, imm16);
+ DsFsmInstrRrr(EmitI(0x29, rs, rt, imm16), ZERO, rt, rs);
}
void MipsAssembler::Sw(Register rt, Register rs, uint16_t imm16) {
- EmitI(0x2b, rs, rt, imm16);
+ DsFsmInstrRrr(EmitI(0x2b, rs, rt, imm16), ZERO, rt, rs);
}
void MipsAssembler::Swl(Register rt, Register rs, uint16_t imm16) {
CHECK(!IsR6());
- EmitI(0x2a, rs, rt, imm16);
+ DsFsmInstrRrr(EmitI(0x2a, rs, rt, imm16), ZERO, rt, rs);
}
void MipsAssembler::Swr(Register rt, Register rs, uint16_t imm16) {
CHECK(!IsR6());
- EmitI(0x2e, rs, rt, imm16);
+ DsFsmInstrRrr(EmitI(0x2e, rs, rt, imm16), ZERO, rt, rs);
}
void MipsAssembler::LlR2(Register rt, Register base, int16_t imm16) {
CHECK(!IsR6());
- EmitI(0x30, base, rt, imm16);
+ DsFsmInstrRrr(EmitI(0x30, base, rt, imm16), rt, base, base);
}
void MipsAssembler::ScR2(Register rt, Register base, int16_t imm16) {
CHECK(!IsR6());
- EmitI(0x38, base, rt, imm16);
+ DsFsmInstrRrr(EmitI(0x38, base, rt, imm16), rt, rt, base);
}
void MipsAssembler::LlR6(Register rt, Register base, int16_t imm9) {
CHECK(IsR6());
CHECK(IsInt<9>(imm9));
- EmitI(0x1f, base, rt, ((imm9 & 0x1ff) << 7) | 0x36);
+ DsFsmInstrRrr(EmitI(0x1f, base, rt, ((imm9 & 0x1ff) << 7) | 0x36), rt, base, base);
}
void MipsAssembler::ScR6(Register rt, Register base, int16_t imm9) {
CHECK(IsR6());
CHECK(IsInt<9>(imm9));
- EmitI(0x1f, base, rt, ((imm9 & 0x1ff) << 7) | 0x26);
+ DsFsmInstrRrr(EmitI(0x1f, base, rt, ((imm9 & 0x1ff) << 7) | 0x26), rt, rt, base);
}
void MipsAssembler::Slt(Register rd, Register rs, Register rt) {
- EmitR(0, rs, rt, rd, 0, 0x2a);
+ DsFsmInstrRrr(EmitR(0, rs, rt, rd, 0, 0x2a), rd, rs, rt);
}
void MipsAssembler::Sltu(Register rd, Register rs, Register rt) {
- EmitR(0, rs, rt, rd, 0, 0x2b);
+ DsFsmInstrRrr(EmitR(0, rs, rt, rd, 0, 0x2b), rd, rs, rt);
}
void MipsAssembler::Slti(Register rt, Register rs, uint16_t imm16) {
- EmitI(0xa, rs, rt, imm16);
+ DsFsmInstrRrr(EmitI(0xa, rs, rt, imm16), rt, rs, rs);
}
void MipsAssembler::Sltiu(Register rt, Register rs, uint16_t imm16) {
- EmitI(0xb, rs, rt, imm16);
+ DsFsmInstrRrr(EmitI(0xb, rs, rt, imm16), rt, rs, rs);
}
void MipsAssembler::B(uint16_t imm16) {
- EmitI(0x4, static_cast<Register>(0), static_cast<Register>(0), imm16);
+ DsFsmInstrNop(EmitI(0x4, static_cast<Register>(0), static_cast<Register>(0), imm16));
}
void MipsAssembler::Bal(uint16_t imm16) {
- EmitI(0x1, static_cast<Register>(0), static_cast<Register>(0x11), imm16);
+ DsFsmInstrNop(EmitI(0x1, static_cast<Register>(0), static_cast<Register>(0x11), imm16));
}
void MipsAssembler::Beq(Register rs, Register rt, uint16_t imm16) {
- EmitI(0x4, rs, rt, imm16);
+ DsFsmInstrNop(EmitI(0x4, rs, rt, imm16));
}
void MipsAssembler::Bne(Register rs, Register rt, uint16_t imm16) {
- EmitI(0x5, rs, rt, imm16);
+ DsFsmInstrNop(EmitI(0x5, rs, rt, imm16));
}
void MipsAssembler::Beqz(Register rt, uint16_t imm16) {
@@ -565,19 +770,19 @@
}
void MipsAssembler::Bltz(Register rt, uint16_t imm16) {
- EmitI(0x1, rt, static_cast<Register>(0), imm16);
+ DsFsmInstrNop(EmitI(0x1, rt, static_cast<Register>(0), imm16));
}
void MipsAssembler::Bgez(Register rt, uint16_t imm16) {
- EmitI(0x1, rt, static_cast<Register>(0x1), imm16);
+ DsFsmInstrNop(EmitI(0x1, rt, static_cast<Register>(0x1), imm16));
}
void MipsAssembler::Blez(Register rt, uint16_t imm16) {
- EmitI(0x6, rt, static_cast<Register>(0), imm16);
+ DsFsmInstrNop(EmitI(0x6, rt, static_cast<Register>(0), imm16));
}
void MipsAssembler::Bgtz(Register rt, uint16_t imm16) {
- EmitI(0x7, rt, static_cast<Register>(0), imm16);
+ DsFsmInstrNop(EmitI(0x7, rt, static_cast<Register>(0), imm16));
}
void MipsAssembler::Bc1f(uint16_t imm16) {
@@ -587,7 +792,7 @@
void MipsAssembler::Bc1f(int cc, uint16_t imm16) {
CHECK(!IsR6());
CHECK(IsUint<3>(cc)) << cc;
- EmitI(0x11, static_cast<Register>(0x8), static_cast<Register>(cc << 2), imm16);
+ DsFsmInstrNop(EmitI(0x11, static_cast<Register>(0x8), static_cast<Register>(cc << 2), imm16));
}
void MipsAssembler::Bc1t(uint16_t imm16) {
@@ -597,19 +802,45 @@
void MipsAssembler::Bc1t(int cc, uint16_t imm16) {
CHECK(!IsR6());
CHECK(IsUint<3>(cc)) << cc;
- EmitI(0x11, static_cast<Register>(0x8), static_cast<Register>((cc << 2) | 1), imm16);
+ DsFsmInstrNop(EmitI(0x11,
+ static_cast<Register>(0x8),
+ static_cast<Register>((cc << 2) | 1),
+ imm16));
}
void MipsAssembler::J(uint32_t addr26) {
- EmitI26(0x2, addr26);
+ DsFsmInstrNop(EmitI26(0x2, addr26));
}
void MipsAssembler::Jal(uint32_t addr26) {
- EmitI26(0x3, addr26);
+ DsFsmInstrNop(EmitI26(0x3, addr26));
}
void MipsAssembler::Jalr(Register rd, Register rs) {
- EmitR(0, rs, static_cast<Register>(0), rd, 0, 0x09);
+ uint32_t last_instruction = delay_slot_.instruction_;
+ bool exchange = (last_instruction != 0 &&
+ (delay_slot_.gpr_outs_mask_ & (1u << rs)) == 0 &&
+ ((delay_slot_.gpr_ins_mask_ | delay_slot_.gpr_outs_mask_) & (1u << rd)) == 0);
+ if (exchange) {
+ // The last instruction cannot be used in a different delay slot,
+ // do not commit the label before it (if any).
+ DsFsmDropLabel();
+ }
+ DsFsmInstrNop(EmitR(0, rs, static_cast<Register>(0), rd, 0, 0x09));
+ if (exchange) {
+ // Exchange the last two instructions in the assembler buffer.
+ size_t size = buffer_.Size();
+ CHECK_GE(size, 2 * sizeof(uint32_t));
+ size_t pos1 = size - 2 * sizeof(uint32_t);
+ size_t pos2 = size - sizeof(uint32_t);
+ uint32_t instr1 = buffer_.Load<uint32_t>(pos1);
+ uint32_t instr2 = buffer_.Load<uint32_t>(pos2);
+ CHECK_EQ(instr1, last_instruction);
+ buffer_.Store<uint32_t>(pos1, instr2);
+ buffer_.Store<uint32_t>(pos2, instr1);
+ } else if (reordering_) {
+ Nop();
+ }
}
void MipsAssembler::Jalr(Register rs) {
@@ -621,38 +852,38 @@
}
void MipsAssembler::Nal() {
- EmitI(0x1, static_cast<Register>(0), static_cast<Register>(0x10), 0);
+ DsFsmInstrNop(EmitI(0x1, static_cast<Register>(0), static_cast<Register>(0x10), 0));
}
void MipsAssembler::Auipc(Register rs, uint16_t imm16) {
CHECK(IsR6());
- EmitI(0x3B, rs, static_cast<Register>(0x1E), imm16);
+ DsFsmInstrNop(EmitI(0x3B, rs, static_cast<Register>(0x1E), imm16));
}
void MipsAssembler::Addiupc(Register rs, uint32_t imm19) {
CHECK(IsR6());
CHECK(IsUint<19>(imm19)) << imm19;
- EmitI21(0x3B, rs, imm19);
+ DsFsmInstrNop(EmitI21(0x3B, rs, imm19));
}
void MipsAssembler::Bc(uint32_t imm26) {
CHECK(IsR6());
- EmitI26(0x32, imm26);
+ DsFsmInstrNop(EmitI26(0x32, imm26));
}
void MipsAssembler::Balc(uint32_t imm26) {
CHECK(IsR6());
- EmitI26(0x3A, imm26);
+ DsFsmInstrNop(EmitI26(0x3A, imm26));
}
void MipsAssembler::Jic(Register rt, uint16_t imm16) {
CHECK(IsR6());
- EmitI(0x36, static_cast<Register>(0), rt, imm16);
+ DsFsmInstrNop(EmitI(0x36, static_cast<Register>(0), rt, imm16));
}
void MipsAssembler::Jialc(Register rt, uint16_t imm16) {
CHECK(IsR6());
- EmitI(0x3E, static_cast<Register>(0), rt, imm16);
+ DsFsmInstrNop(EmitI(0x3E, static_cast<Register>(0), rt, imm16));
}
void MipsAssembler::Bltc(Register rs, Register rt, uint16_t imm16) {
@@ -660,19 +891,19 @@
CHECK_NE(rs, ZERO);
CHECK_NE(rt, ZERO);
CHECK_NE(rs, rt);
- EmitI(0x17, rs, rt, imm16);
+ DsFsmInstrNop(EmitI(0x17, rs, rt, imm16));
}
void MipsAssembler::Bltzc(Register rt, uint16_t imm16) {
CHECK(IsR6());
CHECK_NE(rt, ZERO);
- EmitI(0x17, rt, rt, imm16);
+ DsFsmInstrNop(EmitI(0x17, rt, rt, imm16));
}
void MipsAssembler::Bgtzc(Register rt, uint16_t imm16) {
CHECK(IsR6());
CHECK_NE(rt, ZERO);
- EmitI(0x17, static_cast<Register>(0), rt, imm16);
+ DsFsmInstrNop(EmitI(0x17, static_cast<Register>(0), rt, imm16));
}
void MipsAssembler::Bgec(Register rs, Register rt, uint16_t imm16) {
@@ -680,19 +911,19 @@
CHECK_NE(rs, ZERO);
CHECK_NE(rt, ZERO);
CHECK_NE(rs, rt);
- EmitI(0x16, rs, rt, imm16);
+ DsFsmInstrNop(EmitI(0x16, rs, rt, imm16));
}
void MipsAssembler::Bgezc(Register rt, uint16_t imm16) {
CHECK(IsR6());
CHECK_NE(rt, ZERO);
- EmitI(0x16, rt, rt, imm16);
+ DsFsmInstrNop(EmitI(0x16, rt, rt, imm16));
}
void MipsAssembler::Blezc(Register rt, uint16_t imm16) {
CHECK(IsR6());
CHECK_NE(rt, ZERO);
- EmitI(0x16, static_cast<Register>(0), rt, imm16);
+ DsFsmInstrNop(EmitI(0x16, static_cast<Register>(0), rt, imm16));
}
void MipsAssembler::Bltuc(Register rs, Register rt, uint16_t imm16) {
@@ -700,7 +931,7 @@
CHECK_NE(rs, ZERO);
CHECK_NE(rt, ZERO);
CHECK_NE(rs, rt);
- EmitI(0x7, rs, rt, imm16);
+ DsFsmInstrNop(EmitI(0x7, rs, rt, imm16));
}
void MipsAssembler::Bgeuc(Register rs, Register rt, uint16_t imm16) {
@@ -708,7 +939,7 @@
CHECK_NE(rs, ZERO);
CHECK_NE(rt, ZERO);
CHECK_NE(rs, rt);
- EmitI(0x6, rs, rt, imm16);
+ DsFsmInstrNop(EmitI(0x6, rs, rt, imm16));
}
void MipsAssembler::Beqc(Register rs, Register rt, uint16_t imm16) {
@@ -716,7 +947,7 @@
CHECK_NE(rs, ZERO);
CHECK_NE(rt, ZERO);
CHECK_NE(rs, rt);
- EmitI(0x8, std::min(rs, rt), std::max(rs, rt), imm16);
+ DsFsmInstrNop(EmitI(0x8, std::min(rs, rt), std::max(rs, rt), imm16));
}
void MipsAssembler::Bnec(Register rs, Register rt, uint16_t imm16) {
@@ -724,29 +955,29 @@
CHECK_NE(rs, ZERO);
CHECK_NE(rt, ZERO);
CHECK_NE(rs, rt);
- EmitI(0x18, std::min(rs, rt), std::max(rs, rt), imm16);
+ DsFsmInstrNop(EmitI(0x18, std::min(rs, rt), std::max(rs, rt), imm16));
}
void MipsAssembler::Beqzc(Register rs, uint32_t imm21) {
CHECK(IsR6());
CHECK_NE(rs, ZERO);
- EmitI21(0x36, rs, imm21);
+ DsFsmInstrNop(EmitI21(0x36, rs, imm21));
}
void MipsAssembler::Bnezc(Register rs, uint32_t imm21) {
CHECK(IsR6());
CHECK_NE(rs, ZERO);
- EmitI21(0x3E, rs, imm21);
+ DsFsmInstrNop(EmitI21(0x3E, rs, imm21));
}
void MipsAssembler::Bc1eqz(FRegister ft, uint16_t imm16) {
CHECK(IsR6());
- EmitFI(0x11, 0x9, ft, imm16);
+ DsFsmInstrNop(EmitFI(0x11, 0x9, ft, imm16));
}
void MipsAssembler::Bc1nez(FRegister ft, uint16_t imm16) {
CHECK(IsR6());
- EmitFI(0x11, 0xD, ft, imm16);
+ DsFsmInstrNop(EmitFI(0x11, 0xD, ft, imm16));
}
void MipsAssembler::EmitBcondR2(BranchCondition cond, Register rs, Register rt, uint16_t imm16) {
@@ -868,67 +1099,67 @@
}
void MipsAssembler::AddS(FRegister fd, FRegister fs, FRegister ft) {
- EmitFR(0x11, 0x10, ft, fs, fd, 0x0);
+ DsFsmInstrFff(EmitFR(0x11, 0x10, ft, fs, fd, 0x0), fd, fs, ft);
}
void MipsAssembler::SubS(FRegister fd, FRegister fs, FRegister ft) {
- EmitFR(0x11, 0x10, ft, fs, fd, 0x1);
+ DsFsmInstrFff(EmitFR(0x11, 0x10, ft, fs, fd, 0x1), fd, fs, ft);
}
void MipsAssembler::MulS(FRegister fd, FRegister fs, FRegister ft) {
- EmitFR(0x11, 0x10, ft, fs, fd, 0x2);
+ DsFsmInstrFff(EmitFR(0x11, 0x10, ft, fs, fd, 0x2), fd, fs, ft);
}
void MipsAssembler::DivS(FRegister fd, FRegister fs, FRegister ft) {
- EmitFR(0x11, 0x10, ft, fs, fd, 0x3);
+ DsFsmInstrFff(EmitFR(0x11, 0x10, ft, fs, fd, 0x3), fd, fs, ft);
}
void MipsAssembler::AddD(FRegister fd, FRegister fs, FRegister ft) {
- EmitFR(0x11, 0x11, ft, fs, fd, 0x0);
+ DsFsmInstrFff(EmitFR(0x11, 0x11, ft, fs, fd, 0x0), fd, fs, ft);
}
void MipsAssembler::SubD(FRegister fd, FRegister fs, FRegister ft) {
- EmitFR(0x11, 0x11, ft, fs, fd, 0x1);
+ DsFsmInstrFff(EmitFR(0x11, 0x11, ft, fs, fd, 0x1), fd, fs, ft);
}
void MipsAssembler::MulD(FRegister fd, FRegister fs, FRegister ft) {
- EmitFR(0x11, 0x11, ft, fs, fd, 0x2);
+ DsFsmInstrFff(EmitFR(0x11, 0x11, ft, fs, fd, 0x2), fd, fs, ft);
}
void MipsAssembler::DivD(FRegister fd, FRegister fs, FRegister ft) {
- EmitFR(0x11, 0x11, ft, fs, fd, 0x3);
+ DsFsmInstrFff(EmitFR(0x11, 0x11, ft, fs, fd, 0x3), fd, fs, ft);
}
void MipsAssembler::SqrtS(FRegister fd, FRegister fs) {
- EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x4);
+ DsFsmInstrFff(EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x4), fd, fs, fs);
}
void MipsAssembler::SqrtD(FRegister fd, FRegister fs) {
- EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x4);
+ DsFsmInstrFff(EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x4), fd, fs, fs);
}
void MipsAssembler::AbsS(FRegister fd, FRegister fs) {
- EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x5);
+ DsFsmInstrFff(EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x5), fd, fs, fs);
}
void MipsAssembler::AbsD(FRegister fd, FRegister fs) {
- EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x5);
+ DsFsmInstrFff(EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x5), fd, fs, fs);
}
void MipsAssembler::MovS(FRegister fd, FRegister fs) {
- EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x6);
+ DsFsmInstrFff(EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x6), fd, fs, fs);
}
void MipsAssembler::MovD(FRegister fd, FRegister fs) {
- EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x6);
+ DsFsmInstrFff(EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x6), fd, fs, fs);
}
void MipsAssembler::NegS(FRegister fd, FRegister fs) {
- EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x7);
+ DsFsmInstrFff(EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x7), fd, fs, fs);
}
void MipsAssembler::NegD(FRegister fd, FRegister fs) {
- EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x7);
+ DsFsmInstrFff(EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x7), fd, fs, fs);
}
void MipsAssembler::CunS(FRegister fs, FRegister ft) {
@@ -938,7 +1169,7 @@
void MipsAssembler::CunS(int cc, FRegister fs, FRegister ft) {
CHECK(!IsR6());
CHECK(IsUint<3>(cc)) << cc;
- EmitFR(0x11, 0x10, ft, fs, static_cast<FRegister>(cc << 2), 0x31);
+ DsFsmInstrCff(EmitFR(0x11, 0x10, ft, fs, static_cast<FRegister>(cc << 2), 0x31), cc, fs, ft);
}
void MipsAssembler::CeqS(FRegister fs, FRegister ft) {
@@ -948,7 +1179,7 @@
void MipsAssembler::CeqS(int cc, FRegister fs, FRegister ft) {
CHECK(!IsR6());
CHECK(IsUint<3>(cc)) << cc;
- EmitFR(0x11, 0x10, ft, fs, static_cast<FRegister>(cc << 2), 0x32);
+ DsFsmInstrCff(EmitFR(0x11, 0x10, ft, fs, static_cast<FRegister>(cc << 2), 0x32), cc, fs, ft);
}
void MipsAssembler::CueqS(FRegister fs, FRegister ft) {
@@ -958,7 +1189,7 @@
void MipsAssembler::CueqS(int cc, FRegister fs, FRegister ft) {
CHECK(!IsR6());
CHECK(IsUint<3>(cc)) << cc;
- EmitFR(0x11, 0x10, ft, fs, static_cast<FRegister>(cc << 2), 0x33);
+ DsFsmInstrCff(EmitFR(0x11, 0x10, ft, fs, static_cast<FRegister>(cc << 2), 0x33), cc, fs, ft);
}
void MipsAssembler::ColtS(FRegister fs, FRegister ft) {
@@ -968,7 +1199,7 @@
void MipsAssembler::ColtS(int cc, FRegister fs, FRegister ft) {
CHECK(!IsR6());
CHECK(IsUint<3>(cc)) << cc;
- EmitFR(0x11, 0x10, ft, fs, static_cast<FRegister>(cc << 2), 0x34);
+ DsFsmInstrCff(EmitFR(0x11, 0x10, ft, fs, static_cast<FRegister>(cc << 2), 0x34), cc, fs, ft);
}
void MipsAssembler::CultS(FRegister fs, FRegister ft) {
@@ -978,7 +1209,7 @@
void MipsAssembler::CultS(int cc, FRegister fs, FRegister ft) {
CHECK(!IsR6());
CHECK(IsUint<3>(cc)) << cc;
- EmitFR(0x11, 0x10, ft, fs, static_cast<FRegister>(cc << 2), 0x35);
+ DsFsmInstrCff(EmitFR(0x11, 0x10, ft, fs, static_cast<FRegister>(cc << 2), 0x35), cc, fs, ft);
}
void MipsAssembler::ColeS(FRegister fs, FRegister ft) {
@@ -988,7 +1219,7 @@
void MipsAssembler::ColeS(int cc, FRegister fs, FRegister ft) {
CHECK(!IsR6());
CHECK(IsUint<3>(cc)) << cc;
- EmitFR(0x11, 0x10, ft, fs, static_cast<FRegister>(cc << 2), 0x36);
+ DsFsmInstrCff(EmitFR(0x11, 0x10, ft, fs, static_cast<FRegister>(cc << 2), 0x36), cc, fs, ft);
}
void MipsAssembler::CuleS(FRegister fs, FRegister ft) {
@@ -998,7 +1229,7 @@
void MipsAssembler::CuleS(int cc, FRegister fs, FRegister ft) {
CHECK(!IsR6());
CHECK(IsUint<3>(cc)) << cc;
- EmitFR(0x11, 0x10, ft, fs, static_cast<FRegister>(cc << 2), 0x37);
+ DsFsmInstrCff(EmitFR(0x11, 0x10, ft, fs, static_cast<FRegister>(cc << 2), 0x37), cc, fs, ft);
}
void MipsAssembler::CunD(FRegister fs, FRegister ft) {
@@ -1008,7 +1239,7 @@
void MipsAssembler::CunD(int cc, FRegister fs, FRegister ft) {
CHECK(!IsR6());
CHECK(IsUint<3>(cc)) << cc;
- EmitFR(0x11, 0x11, ft, fs, static_cast<FRegister>(cc << 2), 0x31);
+ DsFsmInstrCff(EmitFR(0x11, 0x11, ft, fs, static_cast<FRegister>(cc << 2), 0x31), cc, fs, ft);
}
void MipsAssembler::CeqD(FRegister fs, FRegister ft) {
@@ -1018,7 +1249,7 @@
void MipsAssembler::CeqD(int cc, FRegister fs, FRegister ft) {
CHECK(!IsR6());
CHECK(IsUint<3>(cc)) << cc;
- EmitFR(0x11, 0x11, ft, fs, static_cast<FRegister>(cc << 2), 0x32);
+ DsFsmInstrCff(EmitFR(0x11, 0x11, ft, fs, static_cast<FRegister>(cc << 2), 0x32), cc, fs, ft);
}
void MipsAssembler::CueqD(FRegister fs, FRegister ft) {
@@ -1028,7 +1259,7 @@
void MipsAssembler::CueqD(int cc, FRegister fs, FRegister ft) {
CHECK(!IsR6());
CHECK(IsUint<3>(cc)) << cc;
- EmitFR(0x11, 0x11, ft, fs, static_cast<FRegister>(cc << 2), 0x33);
+ DsFsmInstrCff(EmitFR(0x11, 0x11, ft, fs, static_cast<FRegister>(cc << 2), 0x33), cc, fs, ft);
}
void MipsAssembler::ColtD(FRegister fs, FRegister ft) {
@@ -1038,7 +1269,7 @@
void MipsAssembler::ColtD(int cc, FRegister fs, FRegister ft) {
CHECK(!IsR6());
CHECK(IsUint<3>(cc)) << cc;
- EmitFR(0x11, 0x11, ft, fs, static_cast<FRegister>(cc << 2), 0x34);
+ DsFsmInstrCff(EmitFR(0x11, 0x11, ft, fs, static_cast<FRegister>(cc << 2), 0x34), cc, fs, ft);
}
void MipsAssembler::CultD(FRegister fs, FRegister ft) {
@@ -1048,7 +1279,7 @@
void MipsAssembler::CultD(int cc, FRegister fs, FRegister ft) {
CHECK(!IsR6());
CHECK(IsUint<3>(cc)) << cc;
- EmitFR(0x11, 0x11, ft, fs, static_cast<FRegister>(cc << 2), 0x35);
+ DsFsmInstrCff(EmitFR(0x11, 0x11, ft, fs, static_cast<FRegister>(cc << 2), 0x35), cc, fs, ft);
}
void MipsAssembler::ColeD(FRegister fs, FRegister ft) {
@@ -1058,7 +1289,7 @@
void MipsAssembler::ColeD(int cc, FRegister fs, FRegister ft) {
CHECK(!IsR6());
CHECK(IsUint<3>(cc)) << cc;
- EmitFR(0x11, 0x11, ft, fs, static_cast<FRegister>(cc << 2), 0x36);
+ DsFsmInstrCff(EmitFR(0x11, 0x11, ft, fs, static_cast<FRegister>(cc << 2), 0x36), cc, fs, ft);
}
void MipsAssembler::CuleD(FRegister fs, FRegister ft) {
@@ -1068,247 +1299,261 @@
void MipsAssembler::CuleD(int cc, FRegister fs, FRegister ft) {
CHECK(!IsR6());
CHECK(IsUint<3>(cc)) << cc;
- EmitFR(0x11, 0x11, ft, fs, static_cast<FRegister>(cc << 2), 0x37);
+ DsFsmInstrCff(EmitFR(0x11, 0x11, ft, fs, static_cast<FRegister>(cc << 2), 0x37), cc, fs, ft);
}
void MipsAssembler::CmpUnS(FRegister fd, FRegister fs, FRegister ft) {
CHECK(IsR6());
- EmitFR(0x11, 0x14, ft, fs, fd, 0x01);
+ DsFsmInstrFff(EmitFR(0x11, 0x14, ft, fs, fd, 0x01), fd, fs, ft);
}
void MipsAssembler::CmpEqS(FRegister fd, FRegister fs, FRegister ft) {
CHECK(IsR6());
- EmitFR(0x11, 0x14, ft, fs, fd, 0x02);
+ DsFsmInstrFff(EmitFR(0x11, 0x14, ft, fs, fd, 0x02), fd, fs, ft);
}
void MipsAssembler::CmpUeqS(FRegister fd, FRegister fs, FRegister ft) {
CHECK(IsR6());
- EmitFR(0x11, 0x14, ft, fs, fd, 0x03);
+ DsFsmInstrFff(EmitFR(0x11, 0x14, ft, fs, fd, 0x03), fd, fs, ft);
}
void MipsAssembler::CmpLtS(FRegister fd, FRegister fs, FRegister ft) {
CHECK(IsR6());
- EmitFR(0x11, 0x14, ft, fs, fd, 0x04);
+ DsFsmInstrFff(EmitFR(0x11, 0x14, ft, fs, fd, 0x04), fd, fs, ft);
}
void MipsAssembler::CmpUltS(FRegister fd, FRegister fs, FRegister ft) {
CHECK(IsR6());
- EmitFR(0x11, 0x14, ft, fs, fd, 0x05);
+ DsFsmInstrFff(EmitFR(0x11, 0x14, ft, fs, fd, 0x05), fd, fs, ft);
}
void MipsAssembler::CmpLeS(FRegister fd, FRegister fs, FRegister ft) {
CHECK(IsR6());
- EmitFR(0x11, 0x14, ft, fs, fd, 0x06);
+ DsFsmInstrFff(EmitFR(0x11, 0x14, ft, fs, fd, 0x06), fd, fs, ft);
}
void MipsAssembler::CmpUleS(FRegister fd, FRegister fs, FRegister ft) {
CHECK(IsR6());
- EmitFR(0x11, 0x14, ft, fs, fd, 0x07);
+ DsFsmInstrFff(EmitFR(0x11, 0x14, ft, fs, fd, 0x07), fd, fs, ft);
}
void MipsAssembler::CmpOrS(FRegister fd, FRegister fs, FRegister ft) {
CHECK(IsR6());
- EmitFR(0x11, 0x14, ft, fs, fd, 0x11);
+ DsFsmInstrFff(EmitFR(0x11, 0x14, ft, fs, fd, 0x11), fd, fs, ft);
}
void MipsAssembler::CmpUneS(FRegister fd, FRegister fs, FRegister ft) {
CHECK(IsR6());
- EmitFR(0x11, 0x14, ft, fs, fd, 0x12);
+ DsFsmInstrFff(EmitFR(0x11, 0x14, ft, fs, fd, 0x12), fd, fs, ft);
}
void MipsAssembler::CmpNeS(FRegister fd, FRegister fs, FRegister ft) {
CHECK(IsR6());
- EmitFR(0x11, 0x14, ft, fs, fd, 0x13);
+ DsFsmInstrFff(EmitFR(0x11, 0x14, ft, fs, fd, 0x13), fd, fs, ft);
}
void MipsAssembler::CmpUnD(FRegister fd, FRegister fs, FRegister ft) {
CHECK(IsR6());
- EmitFR(0x11, 0x15, ft, fs, fd, 0x01);
+ DsFsmInstrFff(EmitFR(0x11, 0x15, ft, fs, fd, 0x01), fd, fs, ft);
}
void MipsAssembler::CmpEqD(FRegister fd, FRegister fs, FRegister ft) {
CHECK(IsR6());
- EmitFR(0x11, 0x15, ft, fs, fd, 0x02);
+ DsFsmInstrFff(EmitFR(0x11, 0x15, ft, fs, fd, 0x02), fd, fs, ft);
}
void MipsAssembler::CmpUeqD(FRegister fd, FRegister fs, FRegister ft) {
CHECK(IsR6());
- EmitFR(0x11, 0x15, ft, fs, fd, 0x03);
+ DsFsmInstrFff(EmitFR(0x11, 0x15, ft, fs, fd, 0x03), fd, fs, ft);
}
void MipsAssembler::CmpLtD(FRegister fd, FRegister fs, FRegister ft) {
CHECK(IsR6());
- EmitFR(0x11, 0x15, ft, fs, fd, 0x04);
+ DsFsmInstrFff(EmitFR(0x11, 0x15, ft, fs, fd, 0x04), fd, fs, ft);
}
void MipsAssembler::CmpUltD(FRegister fd, FRegister fs, FRegister ft) {
CHECK(IsR6());
- EmitFR(0x11, 0x15, ft, fs, fd, 0x05);
+ DsFsmInstrFff(EmitFR(0x11, 0x15, ft, fs, fd, 0x05), fd, fs, ft);
}
void MipsAssembler::CmpLeD(FRegister fd, FRegister fs, FRegister ft) {
CHECK(IsR6());
- EmitFR(0x11, 0x15, ft, fs, fd, 0x06);
+ DsFsmInstrFff(EmitFR(0x11, 0x15, ft, fs, fd, 0x06), fd, fs, ft);
}
void MipsAssembler::CmpUleD(FRegister fd, FRegister fs, FRegister ft) {
CHECK(IsR6());
- EmitFR(0x11, 0x15, ft, fs, fd, 0x07);
+ DsFsmInstrFff(EmitFR(0x11, 0x15, ft, fs, fd, 0x07), fd, fs, ft);
}
void MipsAssembler::CmpOrD(FRegister fd, FRegister fs, FRegister ft) {
CHECK(IsR6());
- EmitFR(0x11, 0x15, ft, fs, fd, 0x11);
+ DsFsmInstrFff(EmitFR(0x11, 0x15, ft, fs, fd, 0x11), fd, fs, ft);
}
void MipsAssembler::CmpUneD(FRegister fd, FRegister fs, FRegister ft) {
CHECK(IsR6());
- EmitFR(0x11, 0x15, ft, fs, fd, 0x12);
+ DsFsmInstrFff(EmitFR(0x11, 0x15, ft, fs, fd, 0x12), fd, fs, ft);
}
void MipsAssembler::CmpNeD(FRegister fd, FRegister fs, FRegister ft) {
CHECK(IsR6());
- EmitFR(0x11, 0x15, ft, fs, fd, 0x13);
+ DsFsmInstrFff(EmitFR(0x11, 0x15, ft, fs, fd, 0x13), fd, fs, ft);
}
void MipsAssembler::Movf(Register rd, Register rs, int cc) {
CHECK(!IsR6());
CHECK(IsUint<3>(cc)) << cc;
- EmitR(0, rs, static_cast<Register>(cc << 2), rd, 0, 0x01);
+ DsFsmInstrRrrc(EmitR(0, rs, static_cast<Register>(cc << 2), rd, 0, 0x01), rd, rs, cc);
}
void MipsAssembler::Movt(Register rd, Register rs, int cc) {
CHECK(!IsR6());
CHECK(IsUint<3>(cc)) << cc;
- EmitR(0, rs, static_cast<Register>((cc << 2) | 1), rd, 0, 0x01);
+ DsFsmInstrRrrc(EmitR(0, rs, static_cast<Register>((cc << 2) | 1), rd, 0, 0x01), rd, rs, cc);
}
void MipsAssembler::MovfS(FRegister fd, FRegister fs, int cc) {
CHECK(!IsR6());
CHECK(IsUint<3>(cc)) << cc;
- EmitFR(0x11, 0x10, static_cast<FRegister>(cc << 2), fs, fd, 0x11);
+ DsFsmInstrFffc(EmitFR(0x11, 0x10, static_cast<FRegister>(cc << 2), fs, fd, 0x11), fd, fs, cc);
}
void MipsAssembler::MovfD(FRegister fd, FRegister fs, int cc) {
CHECK(!IsR6());
CHECK(IsUint<3>(cc)) << cc;
- EmitFR(0x11, 0x11, static_cast<FRegister>(cc << 2), fs, fd, 0x11);
+ DsFsmInstrFffc(EmitFR(0x11, 0x11, static_cast<FRegister>(cc << 2), fs, fd, 0x11), fd, fs, cc);
}
void MipsAssembler::MovtS(FRegister fd, FRegister fs, int cc) {
CHECK(!IsR6());
CHECK(IsUint<3>(cc)) << cc;
- EmitFR(0x11, 0x10, static_cast<FRegister>((cc << 2) | 1), fs, fd, 0x11);
+ DsFsmInstrFffc(EmitFR(0x11, 0x10, static_cast<FRegister>((cc << 2) | 1), fs, fd, 0x11),
+ fd,
+ fs,
+ cc);
}
void MipsAssembler::MovtD(FRegister fd, FRegister fs, int cc) {
CHECK(!IsR6());
CHECK(IsUint<3>(cc)) << cc;
- EmitFR(0x11, 0x11, static_cast<FRegister>((cc << 2) | 1), fs, fd, 0x11);
+ DsFsmInstrFffc(EmitFR(0x11, 0x11, static_cast<FRegister>((cc << 2) | 1), fs, fd, 0x11),
+ fd,
+ fs,
+ cc);
}
void MipsAssembler::SelS(FRegister fd, FRegister fs, FRegister ft) {
CHECK(IsR6());
- EmitFR(0x11, 0x10, ft, fs, fd, 0x10);
+ DsFsmInstrFfff(EmitFR(0x11, 0x10, ft, fs, fd, 0x10), fd, fs, ft);
}
void MipsAssembler::SelD(FRegister fd, FRegister fs, FRegister ft) {
CHECK(IsR6());
- EmitFR(0x11, 0x11, ft, fs, fd, 0x10);
+ DsFsmInstrFfff(EmitFR(0x11, 0x11, ft, fs, fd, 0x10), fd, fs, ft);
}
void MipsAssembler::ClassS(FRegister fd, FRegister fs) {
CHECK(IsR6());
- EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x1b);
+ DsFsmInstrFff(EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x1b), fd, fs, fs);
}
void MipsAssembler::ClassD(FRegister fd, FRegister fs) {
CHECK(IsR6());
- EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x1b);
+ DsFsmInstrFff(EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x1b), fd, fs, fs);
}
void MipsAssembler::MinS(FRegister fd, FRegister fs, FRegister ft) {
CHECK(IsR6());
- EmitFR(0x11, 0x10, ft, fs, fd, 0x1c);
+ DsFsmInstrFff(EmitFR(0x11, 0x10, ft, fs, fd, 0x1c), fd, fs, ft);
}
void MipsAssembler::MinD(FRegister fd, FRegister fs, FRegister ft) {
CHECK(IsR6());
- EmitFR(0x11, 0x11, ft, fs, fd, 0x1c);
+ DsFsmInstrFff(EmitFR(0x11, 0x11, ft, fs, fd, 0x1c), fd, fs, ft);
}
void MipsAssembler::MaxS(FRegister fd, FRegister fs, FRegister ft) {
CHECK(IsR6());
- EmitFR(0x11, 0x10, ft, fs, fd, 0x1e);
+ DsFsmInstrFff(EmitFR(0x11, 0x10, ft, fs, fd, 0x1e), fd, fs, ft);
}
void MipsAssembler::MaxD(FRegister fd, FRegister fs, FRegister ft) {
CHECK(IsR6());
- EmitFR(0x11, 0x11, ft, fs, fd, 0x1e);
+ DsFsmInstrFff(EmitFR(0x11, 0x11, ft, fs, fd, 0x1e), fd, fs, ft);
}
void MipsAssembler::TruncLS(FRegister fd, FRegister fs) {
- EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x09);
+ DsFsmInstrFff(EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x09), fd, fs, fs);
}
void MipsAssembler::TruncLD(FRegister fd, FRegister fs) {
- EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x09);
+ DsFsmInstrFff(EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x09), fd, fs, fs);
}
void MipsAssembler::TruncWS(FRegister fd, FRegister fs) {
- EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x0D);
+ DsFsmInstrFff(EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x0D), fd, fs, fs);
}
void MipsAssembler::TruncWD(FRegister fd, FRegister fs) {
- EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x0D);
+ DsFsmInstrFff(EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x0D), fd, fs, fs);
}
void MipsAssembler::Cvtsw(FRegister fd, FRegister fs) {
- EmitFR(0x11, 0x14, static_cast<FRegister>(0), fs, fd, 0x20);
+ DsFsmInstrFff(EmitFR(0x11, 0x14, static_cast<FRegister>(0), fs, fd, 0x20), fd, fs, fs);
}
void MipsAssembler::Cvtdw(FRegister fd, FRegister fs) {
- EmitFR(0x11, 0x14, static_cast<FRegister>(0), fs, fd, 0x21);
+ DsFsmInstrFff(EmitFR(0x11, 0x14, static_cast<FRegister>(0), fs, fd, 0x21), fd, fs, fs);
}
void MipsAssembler::Cvtsd(FRegister fd, FRegister fs) {
- EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x20);
+ DsFsmInstrFff(EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x20), fd, fs, fs);
}
void MipsAssembler::Cvtds(FRegister fd, FRegister fs) {
- EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x21);
+ DsFsmInstrFff(EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x21), fd, fs, fs);
}
void MipsAssembler::Cvtsl(FRegister fd, FRegister fs) {
- EmitFR(0x11, 0x15, static_cast<FRegister>(0), fs, fd, 0x20);
+ DsFsmInstrFff(EmitFR(0x11, 0x15, static_cast<FRegister>(0), fs, fd, 0x20), fd, fs, fs);
}
void MipsAssembler::Cvtdl(FRegister fd, FRegister fs) {
- EmitFR(0x11, 0x15, static_cast<FRegister>(0), fs, fd, 0x21);
+ DsFsmInstrFff(EmitFR(0x11, 0x15, static_cast<FRegister>(0), fs, fd, 0x21), fd, fs, fs);
}
void MipsAssembler::FloorWS(FRegister fd, FRegister fs) {
- EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0xf);
+ DsFsmInstrFff(EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0xf), fd, fs, fs);
}
void MipsAssembler::FloorWD(FRegister fd, FRegister fs) {
- EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0xf);
+ DsFsmInstrFff(EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0xf), fd, fs, fs);
}
void MipsAssembler::Mfc1(Register rt, FRegister fs) {
- EmitFR(0x11, 0x00, static_cast<FRegister>(rt), fs, static_cast<FRegister>(0), 0x0);
+ DsFsmInstrRf(EmitFR(0x11, 0x00, static_cast<FRegister>(rt), fs, static_cast<FRegister>(0), 0x0),
+ rt,
+ fs);
}
void MipsAssembler::Mtc1(Register rt, FRegister fs) {
- EmitFR(0x11, 0x04, static_cast<FRegister>(rt), fs, static_cast<FRegister>(0), 0x0);
+ DsFsmInstrFr(EmitFR(0x11, 0x04, static_cast<FRegister>(rt), fs, static_cast<FRegister>(0), 0x0),
+ fs,
+ rt);
}
void MipsAssembler::Mfhc1(Register rt, FRegister fs) {
- EmitFR(0x11, 0x03, static_cast<FRegister>(rt), fs, static_cast<FRegister>(0), 0x0);
+ DsFsmInstrRf(EmitFR(0x11, 0x03, static_cast<FRegister>(rt), fs, static_cast<FRegister>(0), 0x0),
+ rt,
+ fs);
}
void MipsAssembler::Mthc1(Register rt, FRegister fs) {
- EmitFR(0x11, 0x07, static_cast<FRegister>(rt), fs, static_cast<FRegister>(0), 0x0);
+ DsFsmInstrFr(EmitFR(0x11, 0x07, static_cast<FRegister>(rt), fs, static_cast<FRegister>(0), 0x0),
+ fs,
+ rt);
}
void MipsAssembler::MoveFromFpuHigh(Register rt, FRegister fs) {
@@ -1330,28 +1575,33 @@
}
void MipsAssembler::Lwc1(FRegister ft, Register rs, uint16_t imm16) {
- EmitI(0x31, rs, static_cast<Register>(ft), imm16);
+ DsFsmInstrFr(EmitI(0x31, rs, static_cast<Register>(ft), imm16), ft, rs);
}
void MipsAssembler::Ldc1(FRegister ft, Register rs, uint16_t imm16) {
- EmitI(0x35, rs, static_cast<Register>(ft), imm16);
+ DsFsmInstrFr(EmitI(0x35, rs, static_cast<Register>(ft), imm16), ft, rs);
}
void MipsAssembler::Swc1(FRegister ft, Register rs, uint16_t imm16) {
- EmitI(0x39, rs, static_cast<Register>(ft), imm16);
+ DsFsmInstrFR(EmitI(0x39, rs, static_cast<Register>(ft), imm16), ft, rs);
}
void MipsAssembler::Sdc1(FRegister ft, Register rs, uint16_t imm16) {
- EmitI(0x3d, rs, static_cast<Register>(ft), imm16);
+ DsFsmInstrFR(EmitI(0x3d, rs, static_cast<Register>(ft), imm16), ft, rs);
}
void MipsAssembler::Break() {
- EmitR(0, static_cast<Register>(0), static_cast<Register>(0),
- static_cast<Register>(0), 0, 0xD);
+ DsFsmInstrNop(EmitR(0, ZERO, ZERO, ZERO, 0, 0xD));
}
void MipsAssembler::Nop() {
- EmitR(0x0, static_cast<Register>(0), static_cast<Register>(0), static_cast<Register>(0), 0, 0x0);
+ DsFsmInstrNop(EmitR(0x0, ZERO, ZERO, ZERO, 0, 0x0));
+}
+
+void MipsAssembler::NopIfNoReordering() {
+ if (!reordering_) {
+ Nop();
+ }
}
void MipsAssembler::Move(Register rd, Register rs) {
@@ -1377,9 +1627,11 @@
}
void MipsAssembler::PopAndReturn(Register rd, Register rt) {
+ bool reordering = SetReorder(false);
Lw(rd, SP, 0);
Jr(rt);
- DecreaseFrameSize(kMipsWordSize);
+ DecreaseFrameSize(kMipsWordSize); // Single instruction in delay slot.
+ SetReorder(reordering);
}
void MipsAssembler::LoadConst32(Register rd, int32_t value) {
@@ -1550,7 +1802,8 @@
target_(target),
lhs_reg_(0),
rhs_reg_(0),
- condition_(kUncond) {
+ condition_(kUncond),
+ delayed_instruction_(kUnfilledDelaySlot) {
InitializeType(is_call, /* is_literal */ false, is_r6);
}
@@ -1565,7 +1818,8 @@
target_(target),
lhs_reg_(lhs_reg),
rhs_reg_(rhs_reg),
- condition_(condition) {
+ condition_(condition),
+ delayed_instruction_(kUnfilledDelaySlot) {
CHECK_NE(condition, kUncond);
switch (condition) {
case kCondLT:
@@ -1617,7 +1871,8 @@
target_(kUnresolved),
lhs_reg_(dest_reg),
rhs_reg_(base_reg),
- condition_(kUncond) {
+ condition_(kUncond),
+ delayed_instruction_(kUnfilledDelaySlot) {
CHECK_NE(dest_reg, ZERO);
if (is_r6) {
CHECK_EQ(base_reg, ZERO);
@@ -1696,12 +1951,38 @@
return old_location_;
}
+uint32_t MipsAssembler::Branch::GetPrecedingInstructionLength(Type type) const {
+ // Short branches with delay slots always consist of two instructions, the branch
+ // and the delay slot, irrespective of whether the delay slot is filled with a
+ // useful instruction or not.
+ // Long composite branches may have a length longer by one instruction than
+ // specified in branch_info_[].length. This happens when an instruction is taken
+ // to fill the short branch delay slot, but the branch eventually becomes long
+ // and formally has no delay slot to fill. This instruction is placed at the
+ // beginning of the long composite branch and this needs to be accounted for in
+ // the branch length and the location of the offset encoded in the branch.
+ switch (type) {
+ case kLongUncondBranch:
+ case kLongCondBranch:
+ case kLongCall:
+ case kR6LongCondBranch:
+ return (delayed_instruction_ != kUnfilledDelaySlot &&
+ delayed_instruction_ != kUnfillableDelaySlot) ? 1 : 0;
+ default:
+ return 0;
+ }
+}
+
+uint32_t MipsAssembler::Branch::GetPrecedingInstructionSize(Type type) const {
+ return GetPrecedingInstructionLength(type) * sizeof(uint32_t);
+}
+
uint32_t MipsAssembler::Branch::GetLength() const {
- return branch_info_[type_].length;
+ return GetPrecedingInstructionLength(type_) + branch_info_[type_].length;
}
uint32_t MipsAssembler::Branch::GetOldLength() const {
- return branch_info_[old_type_].length;
+ return GetPrecedingInstructionLength(old_type_) + branch_info_[old_type_].length;
}
uint32_t MipsAssembler::Branch::GetSize() const {
@@ -1883,7 +2164,8 @@
}
uint32_t MipsAssembler::Branch::GetOffsetLocation() const {
- return location_ + branch_info_[type_].instr_offset * sizeof(uint32_t);
+ return location_ + GetPrecedingInstructionSize(type_) +
+ branch_info_[type_].instr_offset * sizeof(uint32_t);
}
uint32_t MipsAssembler::GetBranchOrPcRelBaseForEncoding(const MipsAssembler::Branch* branch) const {
@@ -1925,6 +2207,9 @@
CHECK(!label->IsBound());
uint32_t bound_pc = buffer_.Size();
+ // Make the delay slot FSM aware of the new label.
+ DsFsmLabel();
+
// Walk the list of branches referring to and preceding this label.
// Store the previously unknown target addresses in them.
while (label->IsLinked()) {
@@ -1997,11 +2282,15 @@
void MipsAssembler::FinalizeLabeledBranch(MipsLabel* label) {
uint32_t length = branches_.back().GetLength();
+ // Commit the last branch target label (if any).
+ DsFsmCommitLabel();
if (!label->IsBound()) {
// Branch forward (to a following label), distance is unknown.
// The first branch forward will contain 0, serving as the terminator of
// the list of forward-reaching branches.
Emit(label->position_);
+ // Nothing for the delay slot (yet).
+ DsFsmInstrNop(0);
length--;
// Now make the label object point to this branch
// (this forms a linked list of branches preceding this label).
@@ -2014,9 +2303,139 @@
}
}
+bool MipsAssembler::Branch::CanHaveDelayedInstruction(const DelaySlot& delay_slot) const {
+ if (delay_slot.instruction_ == 0) {
+ // NOP or no instruction for the delay slot.
+ return false;
+ }
+ switch (type_) {
+ // R2 unconditional branches.
+ case kUncondBranch:
+ case kLongUncondBranch:
+ // There are no register interdependencies.
+ return true;
+
+ // R2 calls.
+ case kCall:
+ case kLongCall:
+ // Instructions depending on or modifying RA should not be moved into delay slots
+ // of branches modifying RA.
+ return ((delay_slot.gpr_ins_mask_ | delay_slot.gpr_outs_mask_) & (1u << RA)) == 0;
+
+ // R2 conditional branches.
+ case kCondBranch:
+ case kLongCondBranch:
+ switch (condition_) {
+ // Branches with one GPR source.
+ case kCondLTZ:
+ case kCondGEZ:
+ case kCondLEZ:
+ case kCondGTZ:
+ case kCondEQZ:
+ case kCondNEZ:
+ return (delay_slot.gpr_outs_mask_ & (1u << lhs_reg_)) == 0;
+
+ // Branches with two GPR sources.
+ case kCondEQ:
+ case kCondNE:
+ return (delay_slot.gpr_outs_mask_ & ((1u << lhs_reg_) | (1u << rhs_reg_))) == 0;
+
+ // Branches with one FPU condition code source.
+ case kCondF:
+ case kCondT:
+ return (delay_slot.cc_outs_mask_ & (1u << lhs_reg_)) == 0;
+
+ default:
+ // We don't support synthetic R2 branches (preceded with slt[u]) at this level
+ // (R2 doesn't have branches to compare 2 registers using <, <=, >=, >).
+ LOG(FATAL) << "Unexpected branch condition " << condition_;
+ UNREACHABLE();
+ }
+
+ // R6 unconditional branches.
+ case kR6UncondBranch:
+ case kR6LongUncondBranch:
+ // R6 calls.
+ case kR6Call:
+ case kR6LongCall:
+ // There are no delay slots.
+ return false;
+
+ // R6 conditional branches.
+ case kR6CondBranch:
+ case kR6LongCondBranch:
+ switch (condition_) {
+ // Branches with one FPU register source.
+ case kCondF:
+ case kCondT:
+ return (delay_slot.fpr_outs_mask_ & (1u << lhs_reg_)) == 0;
+ // Others have a forbidden slot instead of a delay slot.
+ default:
+ return false;
+ }
+
+ // Literals.
+ default:
+ LOG(FATAL) << "Unexpected branch type " << type_;
+ UNREACHABLE();
+ }
+}
+
+uint32_t MipsAssembler::Branch::GetDelayedInstruction() const {
+ return delayed_instruction_;
+}
+
+void MipsAssembler::Branch::SetDelayedInstruction(uint32_t instruction) {
+ CHECK_NE(instruction, kUnfilledDelaySlot);
+ CHECK_EQ(delayed_instruction_, kUnfilledDelaySlot);
+ delayed_instruction_ = instruction;
+}
+
+void MipsAssembler::Branch::DecrementLocations() {
+ // We first create a branch object, which gets its type and locations initialized,
+ // and then we check if the branch can actually have the preceding instruction moved
+ // into its delay slot. If it can, the branch locations need to be decremented.
+ //
+ // We could make the check before creating the branch object and avoid the location
+ // adjustment, but the check is cleaner when performed on an initialized branch
+ // object.
+ //
+ // If the branch is backwards (to a previously bound label), reducing the locations
+ // cannot cause a short branch to exceed its offset range because the offset reduces.
+ // And this is not at all a problem for a long branch backwards.
+ //
+ // If the branch is forward (not linked to any label yet), reducing the locations
+ // is harmless. The branch will be promoted to long if needed when the target is known.
+ CHECK_EQ(location_, old_location_);
+ CHECK_GE(old_location_, sizeof(uint32_t));
+ old_location_ -= sizeof(uint32_t);
+ location_ = old_location_;
+}
+
+void MipsAssembler::MoveInstructionToDelaySlot(Branch& branch) {
+ if (branch.CanHaveDelayedInstruction(delay_slot_)) {
+ // The last instruction cannot be used in a different delay slot,
+ // do not commit the label before it (if any).
+ DsFsmDropLabel();
+ // Remove the last emitted instruction.
+ size_t size = buffer_.Size();
+ CHECK_GE(size, sizeof(uint32_t));
+ size -= sizeof(uint32_t);
+ CHECK_EQ(buffer_.Load<uint32_t>(size), delay_slot_.instruction_);
+ buffer_.Resize(size);
+ // Attach it to the branch and adjust the branch locations.
+ branch.DecrementLocations();
+ branch.SetDelayedInstruction(delay_slot_.instruction_);
+ } else if (!reordering_ && branch.GetType() == Branch::kUncondBranch) {
+ // If reordefing is disabled, prevent absorption of the target instruction.
+ branch.SetDelayedInstruction(Branch::kUnfillableDelaySlot);
+ }
+}
+
void MipsAssembler::Buncond(MipsLabel* label) {
uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
branches_.emplace_back(IsR6(), buffer_.Size(), target, /* is_call */ false);
+ MoveInstructionToDelaySlot(branches_.back());
FinalizeLabeledBranch(label);
}
@@ -2027,12 +2446,14 @@
}
uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
branches_.emplace_back(IsR6(), buffer_.Size(), target, condition, lhs, rhs);
+ MoveInstructionToDelaySlot(branches_.back());
FinalizeLabeledBranch(label);
}
void MipsAssembler::Call(MipsLabel* label) {
uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
branches_.emplace_back(IsR6(), buffer_.Size(), target, /* is_call */ true);
+ MoveInstructionToDelaySlot(branches_.back());
FinalizeLabeledBranch(label);
}
@@ -2104,6 +2525,7 @@
uint32_t end = old_size;
for (size_t i = branch_count; i > 0; ) {
Branch& branch = branches_[--i];
+ CHECK_GE(end, branch.GetOldEndLocation());
uint32_t size = end - branch.GetOldEndLocation();
buffer_.Move(branch.GetEndLocation(), branch.GetOldEndLocation(), size);
end = branch.GetOldLocation();
@@ -2148,26 +2570,53 @@
BranchCondition condition = branch->GetCondition();
Register lhs = branch->GetLeftRegister();
Register rhs = branch->GetRightRegister();
+ uint32_t delayed_instruction = branch->GetDelayedInstruction();
switch (branch->GetType()) {
// R2 short branches.
case Branch::kUncondBranch:
+ if (delayed_instruction == Branch::kUnfillableDelaySlot) {
+ // The branch was created when reordering was disabled, do not absorb the target
+ // instruction.
+ delayed_instruction = 0; // NOP.
+ } else if (delayed_instruction == Branch::kUnfilledDelaySlot) {
+ // Try to absorb the target instruction into the delay slot.
+ delayed_instruction = 0; // NOP.
+ // Incrementing the signed 16-bit offset past the target instruction must not
+ // cause overflow into the negative subrange, check for the max offset.
+ if (offset != 0x7FFF) {
+ uint32_t target = branch->GetTarget();
+ if (std::binary_search(ds_fsm_target_pcs_.begin(), ds_fsm_target_pcs_.end(), target)) {
+ delayed_instruction = buffer_.Load<uint32_t>(target);
+ offset++;
+ }
+ }
+ }
CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
B(offset);
- Nop(); // TODO: improve by filling the delay slot.
+ Emit(delayed_instruction);
break;
case Branch::kCondBranch:
+ DCHECK_NE(delayed_instruction, Branch::kUnfillableDelaySlot);
+ if (delayed_instruction == Branch::kUnfilledDelaySlot) {
+ delayed_instruction = 0; // NOP.
+ }
CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
EmitBcondR2(condition, lhs, rhs, offset);
- Nop(); // TODO: improve by filling the delay slot.
+ Emit(delayed_instruction);
break;
case Branch::kCall:
+ DCHECK_NE(delayed_instruction, Branch::kUnfillableDelaySlot);
+ if (delayed_instruction == Branch::kUnfilledDelaySlot) {
+ delayed_instruction = 0; // NOP.
+ }
CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
Bal(offset);
- Nop(); // TODO: improve by filling the delay slot.
+ Emit(delayed_instruction);
break;
// R2 near literal.
case Branch::kLiteral:
+ DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
Lw(lhs, rhs, offset);
break;
@@ -2192,6 +2641,12 @@
// For now simply use the stack for RA. This should be OK since for the
// vast majority of code a short PC-relative branch is sufficient.
// TODO: can this be improved?
+ // TODO: consider generation of a shorter sequence when we know that RA
+ // is explicitly preserved by the method entry/exit code.
+ if (delayed_instruction != Branch::kUnfilledDelaySlot &&
+ delayed_instruction != Branch::kUnfillableDelaySlot) {
+ Emit(delayed_instruction);
+ }
Push(RA);
Nal();
CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
@@ -2204,6 +2659,10 @@
break;
case Branch::kLongCondBranch:
// The comment on case 'Branch::kLongUncondBranch' applies here as well.
+ DCHECK_NE(delayed_instruction, Branch::kUnfillableDelaySlot);
+ if (delayed_instruction != Branch::kUnfilledDelaySlot) {
+ Emit(delayed_instruction);
+ }
// Note: the opposite condition branch encodes 8 as the distance, which is equal to the
// number of instructions skipped:
// (PUSH(IncreaseFrameSize(ADDIU) + SW) + NAL + LUI + ORI + ADDU + LW + JR).
@@ -2219,6 +2678,10 @@
DecreaseFrameSize(kMipsWordSize);
break;
case Branch::kLongCall:
+ DCHECK_NE(delayed_instruction, Branch::kUnfillableDelaySlot);
+ if (delayed_instruction != Branch::kUnfilledDelaySlot) {
+ Emit(delayed_instruction);
+ }
Nal();
CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
Lui(AT, High16Bits(offset));
@@ -2230,6 +2693,7 @@
// R2 far literal.
case Branch::kFarLiteral:
+ DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
offset += (offset & 0x8000) << 1; // Account for sign extension in lw.
CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
Lui(AT, High16Bits(offset));
@@ -2239,33 +2703,48 @@
// R6 short branches.
case Branch::kR6UncondBranch:
+ DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
Bc(offset);
break;
case Branch::kR6CondBranch:
CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
EmitBcondR6(condition, lhs, rhs, offset);
- Nop(); // TODO: improve by filling the forbidden/delay slot.
+ DCHECK_NE(delayed_instruction, Branch::kUnfillableDelaySlot);
+ if (delayed_instruction != Branch::kUnfilledDelaySlot) {
+ Emit(delayed_instruction);
+ } else {
+ // TODO: improve by filling the forbidden slot (IFF this is
+ // a forbidden and not a delay slot).
+ Nop();
+ }
break;
case Branch::kR6Call:
+ DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
Balc(offset);
break;
// R6 near literal.
case Branch::kR6Literal:
+ DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
Lwpc(lhs, offset);
break;
// R6 long branches.
case Branch::kR6LongUncondBranch:
+ DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
offset += (offset & 0x8000) << 1; // Account for sign extension in jic.
CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
Auipc(AT, High16Bits(offset));
Jic(AT, Low16Bits(offset));
break;
case Branch::kR6LongCondBranch:
+ DCHECK_NE(delayed_instruction, Branch::kUnfillableDelaySlot);
+ if (delayed_instruction != Branch::kUnfilledDelaySlot) {
+ Emit(delayed_instruction);
+ }
EmitBcondR6(Branch::OppositeCondition(condition), lhs, rhs, 2);
offset += (offset & 0x8000) << 1; // Account for sign extension in jic.
CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
@@ -2273,6 +2752,7 @@
Jic(AT, Low16Bits(offset));
break;
case Branch::kR6LongCall:
+ DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
offset += (offset & 0x8000) << 1; // Account for sign extension in jialc.
CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
Auipc(AT, High16Bits(offset));
@@ -2281,6 +2761,7 @@
// R6 far literal.
case Branch::kR6FarLiteral:
+ DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
offset += (offset & 0x8000) << 1; // Account for sign extension in lw.
CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
Auipc(AT, High16Bits(offset));
@@ -2331,12 +2812,60 @@
Bcond(label, kCondGTZ, rt);
}
+bool MipsAssembler::CanExchangeWithSlt(Register rs, Register rt) const {
+ // If the instruction modifies AT, `rs` or `rt`, it can't be exchanged with the slt[u]
+ // instruction because either slt[u] depends on `rs` or `rt` or the following
+ // conditional branch depends on AT set by slt[u].
+ // Likewise, if the instruction depends on AT, it can't be exchanged with slt[u]
+ // because slt[u] changes AT.
+ return (delay_slot_.instruction_ != 0 &&
+ (delay_slot_.gpr_outs_mask_ & ((1u << AT) | (1u << rs) | (1u << rt))) == 0 &&
+ (delay_slot_.gpr_ins_mask_ & (1u << AT)) == 0);
+}
+
+void MipsAssembler::ExchangeWithSlt(const DelaySlot& forwarded_slot) {
+ // Exchange the last two instructions in the assembler buffer.
+ size_t size = buffer_.Size();
+ CHECK_GE(size, 2 * sizeof(uint32_t));
+ size_t pos1 = size - 2 * sizeof(uint32_t);
+ size_t pos2 = size - sizeof(uint32_t);
+ uint32_t instr1 = buffer_.Load<uint32_t>(pos1);
+ uint32_t instr2 = buffer_.Load<uint32_t>(pos2);
+ CHECK_EQ(instr1, forwarded_slot.instruction_);
+ CHECK_EQ(instr2, delay_slot_.instruction_);
+ buffer_.Store<uint32_t>(pos1, instr2);
+ buffer_.Store<uint32_t>(pos2, instr1);
+ // Set the current delay slot information to that of the last instruction
+ // in the buffer.
+ delay_slot_ = forwarded_slot;
+}
+
+void MipsAssembler::GenerateSltForCondBranch(bool unsigned_slt, Register rs, Register rt) {
+ // If possible, exchange the slt[u] instruction with the preceding instruction,
+ // so it can fill the delay slot.
+ DelaySlot forwarded_slot = delay_slot_;
+ bool exchange = CanExchangeWithSlt(rs, rt);
+ if (exchange) {
+ // The last instruction cannot be used in a different delay slot,
+ // do not commit the label before it (if any).
+ DsFsmDropLabel();
+ }
+ if (unsigned_slt) {
+ Sltu(AT, rs, rt);
+ } else {
+ Slt(AT, rs, rt);
+ }
+ if (exchange) {
+ ExchangeWithSlt(forwarded_slot);
+ }
+}
+
void MipsAssembler::Blt(Register rs, Register rt, MipsLabel* label) {
if (IsR6()) {
Bcond(label, kCondLT, rs, rt);
} else if (!Branch::IsNop(kCondLT, rs, rt)) {
// Synthesize the instruction (not available on R2).
- Slt(AT, rs, rt);
+ GenerateSltForCondBranch(/* unsigned_slt */ false, rs, rt);
Bnez(AT, label);
}
}
@@ -2348,7 +2877,7 @@
B(label);
} else {
// Synthesize the instruction (not available on R2).
- Slt(AT, rs, rt);
+ GenerateSltForCondBranch(/* unsigned_slt */ false, rs, rt);
Beqz(AT, label);
}
}
@@ -2358,7 +2887,7 @@
Bcond(label, kCondLTU, rs, rt);
} else if (!Branch::IsNop(kCondLTU, rs, rt)) {
// Synthesize the instruction (not available on R2).
- Sltu(AT, rs, rt);
+ GenerateSltForCondBranch(/* unsigned_slt */ true, rs, rt);
Bnez(AT, label);
}
}
@@ -2370,7 +2899,7 @@
B(label);
} else {
// Synthesize the instruction (not available on R2).
- Sltu(AT, rs, rt);
+ GenerateSltForCondBranch(/* unsigned_slt */ true, rs, rt);
Beqz(AT, label);
}
}
@@ -2613,12 +3142,22 @@
LoadFromOffset(kLoadWord, RA, SP, stack_offset);
cfi_.Restore(DWARFReg(RA));
- // Decrease frame to required size.
- DecreaseFrameSize(frame_size);
-
- // Then jump to the return address.
- Jr(RA);
- Nop();
+ // Adjust the stack pointer in the delay slot if doing so doesn't break CFI.
+ bool exchange = IsInt<16>(static_cast<int32_t>(frame_size));
+ bool reordering = SetReorder(false);
+ if (exchange) {
+ // Jump to the return address.
+ Jr(RA);
+ // Decrease frame to required size.
+ DecreaseFrameSize(frame_size); // Single instruction in delay slot.
+ } else {
+ // Decrease frame to required size.
+ DecreaseFrameSize(frame_size);
+ // Jump to the return address.
+ Jr(RA);
+ Nop(); // In delay slot.
+ }
+ SetReorder(reordering);
// The CFI should be restored for any code that follows the exit block.
cfi_.RestoreState();
@@ -2963,7 +3502,7 @@
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
base.AsCoreRegister(), offset.Int32Value());
Jalr(scratch.AsCoreRegister());
- Nop();
+ NopIfNoReordering();
// TODO: place reference map on call.
}
@@ -2975,7 +3514,7 @@
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
scratch.AsCoreRegister(), offset.Int32Value());
Jalr(scratch.AsCoreRegister());
- Nop();
+ NopIfNoReordering();
// TODO: place reference map on call.
}
@@ -2998,9 +3537,6 @@
exception_blocks_.emplace_back(scratch, stack_adjust);
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
S1, Thread::ExceptionOffset<kMipsPointerSize>().Int32Value());
- // TODO: on MIPS32R6 prefer Bnezc(scratch.AsCoreRegister(), slow.Entry());
- // as the NAL instruction (occurring in long R2 branches) may become deprecated.
- // For now use common for R2 and R6 instructions as this code must execute on both.
Bnez(scratch.AsCoreRegister(), exception_blocks_.back().Entry());
}
@@ -3017,7 +3553,7 @@
LoadFromOffset(kLoadWord, T9, S1,
QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, pDeliverException).Int32Value());
Jr(T9);
- Nop();
+ NopIfNoReordering();
// Call never returns.
Break();
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 434ca67..d50c439 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -154,6 +154,9 @@
: Assembler(arena),
overwriting_(false),
overwrite_location_(0),
+ reordering_(true),
+ ds_fsm_state_(kExpectingLabel),
+ ds_fsm_target_pc_(0),
literals_(arena->Adapter(kArenaAllocAssembler)),
last_position_adjustment_(0),
last_old_position_(0),
@@ -163,6 +166,7 @@
}
size_t CodeSize() const OVERRIDE { return Assembler::CodeSize(); }
+ size_t CodePosition() OVERRIDE;
DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); }
virtual ~MipsAssembler() {
@@ -256,6 +260,11 @@
void Slti(Register rt, Register rs, uint16_t imm16);
void Sltiu(Register rt, Register rs, uint16_t imm16);
+ // Branches and jumps to immediate offsets/addresses do not take care of their
+ // delay/forbidden slots and generally should not be used directly. This applies
+ // to the following R2 and R6 branch/jump instructions with imm16, imm21, addr26
+ // offsets/addresses.
+ // Use branches/jumps to labels instead.
void B(uint16_t imm16);
void Bal(uint16_t imm16);
void Beq(Register rs, Register rt, uint16_t imm16);
@@ -272,9 +281,13 @@
void Bc1t(int cc, uint16_t imm16); // R2
void J(uint32_t addr26);
void Jal(uint32_t addr26);
+ // Jalr() and Jr() fill their delay slots when reordering is enabled.
+ // When reordering is disabled, the delay slots must be filled manually.
+ // You may use NopIfNoReordering() to fill them when reordering is disabled.
void Jalr(Register rd, Register rs);
void Jalr(Register rs);
void Jr(Register rs);
+ // Nal() does not fill its delay slot. It must be filled manually.
void Nal();
void Auipc(Register rs, uint16_t imm16); // R6
void Addiupc(Register rs, uint32_t imm19); // R6
@@ -403,6 +416,7 @@
void Break();
void Nop();
+ void NopIfNoReordering();
void Move(Register rd, Register rs);
void Clear(Register rd);
void Not(Register rd, Register rs);
@@ -414,7 +428,8 @@
void LoadSConst32(FRegister r, int32_t value, Register temp);
void Addiu32(Register rt, Register rs, int32_t value, Register rtmp = AT);
- // These will generate R2 branches or R6 branches as appropriate.
+ // These will generate R2 branches or R6 branches as appropriate and take care of
+ // the delay/forbidden slots.
void Bind(MipsLabel* label);
void B(MipsLabel* label);
void Bal(MipsLabel* label);
@@ -868,7 +883,51 @@
};
friend std::ostream& operator<<(std::ostream& os, const BranchCondition& rhs);
+ // Enables or disables instruction reordering (IOW, automatic filling of delay slots)
+ // similarly to ".set reorder" / ".set noreorder" in traditional MIPS assembly.
+ // Returns the last state, which may be useful for temporary enabling/disabling of
+ // reordering.
+ bool SetReorder(bool enable);
+
private:
+ // Description of the last instruction in terms of input and output registers.
+ // Used to make the decision of moving the instruction into a delay slot.
+ struct DelaySlot {
+ DelaySlot();
+ // Encoded instruction that may be used to fill the delay slot or 0
+ // (0 conveniently represents NOP).
+ uint32_t instruction_;
+ // Mask of output GPRs for the instruction.
+ uint32_t gpr_outs_mask_;
+ // Mask of input GPRs for the instruction.
+ uint32_t gpr_ins_mask_;
+ // Mask of output FPRs for the instruction.
+ uint32_t fpr_outs_mask_;
+ // Mask of input FPRs for the instruction.
+ uint32_t fpr_ins_mask_;
+ // Mask of output FPU condition code flags for the instruction.
+ uint32_t cc_outs_mask_;
+ // Mask of input FPU condition code flags for the instruction.
+ uint32_t cc_ins_mask_;
+ // Branches never operate on the LO and HI registers, hence there's
+ // no mask for LO and HI.
+ };
+
+ // Delay slot finite state machine's (DS FSM's) state. The FSM state is updated
+ // upon every new instruction and label generated. The FSM detects instructions
+ // suitable for delay slots and immediately preceded with labels. These are target
+ // instructions for branches. If an unconditional R2 branch does not get its delay
+ // slot filled with the immediately preceding instruction, it may instead get the
+ // slot filled with the target instruction (the branch will need its offset
+ // incremented past the target instruction). We call this "absorption". The FSM
+ // records PCs of the target instructions suitable for this optimization.
+ enum DsFsmState {
+ kExpectingLabel,
+ kExpectingInstruction,
+ kExpectingCommit
+ };
+ friend std::ostream& operator<<(std::ostream& os, const DsFsmState& rhs);
+
class Branch {
public:
enum Type {
@@ -910,6 +969,17 @@
static constexpr uint32_t kUnresolved = 0xffffffff; // Unresolved target_
static constexpr int32_t kMaxBranchLength = 32;
static constexpr int32_t kMaxBranchSize = kMaxBranchLength * sizeof(uint32_t);
+ // The following two instruction encodings can never legally occur in branch delay
+ // slots and are used as markers.
+ //
+ // kUnfilledDelaySlot means that the branch may use either the preceding or the target
+ // instruction to fill its delay slot (the latter is only possible with unconditional
+ // R2 branches and is termed here as "absorption").
+ static constexpr uint32_t kUnfilledDelaySlot = 0x10000000; // beq zero, zero, 0.
+ // kUnfillableDelaySlot means that the branch cannot use an instruction (other than NOP)
+ // to fill its delay slot. This is only used for unconditional R2 branches to prevent
+ // absorption of the target instruction when reordering is disabled.
+ static constexpr uint32_t kUnfillableDelaySlot = 0x13FF0000; // beq ra, ra, 0.
struct BranchInfo {
// Branch length as a number of 4-byte-long instructions.
@@ -958,6 +1028,8 @@
uint32_t GetTarget() const;
uint32_t GetLocation() const;
uint32_t GetOldLocation() const;
+ uint32_t GetPrecedingInstructionLength(Type type) const;
+ uint32_t GetPrecedingInstructionSize(Type type) const;
uint32_t GetLength() const;
uint32_t GetOldLength() const;
uint32_t GetSize() const;
@@ -967,6 +1039,12 @@
bool IsLong() const;
bool IsResolved() const;
+ // Various helpers for branch delay slot management.
+ bool CanHaveDelayedInstruction(const DelaySlot& delay_slot) const;
+ void SetDelayedInstruction(uint32_t instruction);
+ uint32_t GetDelayedInstruction() const;
+ void DecrementLocations();
+
// Returns the bit size of the signed offset that the branch instruction can handle.
OffsetBits GetOffsetSize() const;
@@ -1031,27 +1109,34 @@
// Helper for the above.
void InitShortOrLong(OffsetBits ofs_size, Type short_type, Type long_type);
- uint32_t old_location_; // Offset into assembler buffer in bytes.
- uint32_t location_; // Offset into assembler buffer in bytes.
- uint32_t target_; // Offset into assembler buffer in bytes.
+ uint32_t old_location_; // Offset into assembler buffer in bytes.
+ uint32_t location_; // Offset into assembler buffer in bytes.
+ uint32_t target_; // Offset into assembler buffer in bytes.
- uint32_t lhs_reg_; // Left-hand side register in conditional branches or
- // indirect call register.
- uint32_t rhs_reg_; // Right-hand side register in conditional branches.
- BranchCondition condition_; // Condition for conditional branches.
+ uint32_t lhs_reg_; // Left-hand side register in conditional branches or
+ // FPU condition code. Destination register in literals.
+ uint32_t rhs_reg_; // Right-hand side register in conditional branches.
+ // Base register in literals (ZERO on R6).
+ BranchCondition condition_; // Condition for conditional branches.
- Type type_; // Current type of the branch.
- Type old_type_; // Initial type of the branch.
+ Type type_; // Current type of the branch.
+ Type old_type_; // Initial type of the branch.
+
+ uint32_t delayed_instruction_; // Encoded instruction for the delay slot or
+ // kUnfilledDelaySlot if none but fillable or
+ // kUnfillableDelaySlot if none and unfillable
+ // (the latter is only used for unconditional R2
+ // branches).
};
friend std::ostream& operator<<(std::ostream& os, const Branch::Type& rhs);
friend std::ostream& operator<<(std::ostream& os, const Branch::OffsetBits& rhs);
- void EmitR(int opcode, Register rs, Register rt, Register rd, int shamt, int funct);
- void EmitI(int opcode, Register rs, Register rt, uint16_t imm);
- void EmitI21(int opcode, Register rs, uint32_t imm21);
- void EmitI26(int opcode, uint32_t imm26);
- void EmitFR(int opcode, int fmt, FRegister ft, FRegister fs, FRegister fd, int funct);
- void EmitFI(int opcode, int fmt, FRegister rt, uint16_t imm);
+ uint32_t EmitR(int opcode, Register rs, Register rt, Register rd, int shamt, int funct);
+ uint32_t EmitI(int opcode, Register rs, Register rt, uint16_t imm);
+ uint32_t EmitI21(int opcode, Register rs, uint32_t imm21);
+ uint32_t EmitI26(int opcode, uint32_t imm26);
+ uint32_t EmitFR(int opcode, int fmt, FRegister ft, FRegister fs, FRegister fd, int funct);
+ uint32_t EmitFI(int opcode, int fmt, FRegister rt, uint16_t imm);
void EmitBcondR2(BranchCondition cond, Register rs, Register rt, uint16_t imm16);
void EmitBcondR6(BranchCondition cond, Register rs, Register rt, uint32_t imm16_21);
@@ -1060,6 +1145,33 @@
void Call(MipsLabel* label);
void FinalizeLabeledBranch(MipsLabel* label);
+ // Various helpers for branch delay slot management.
+ void DsFsmInstr(uint32_t instruction,
+ uint32_t gpr_outs_mask,
+ uint32_t gpr_ins_mask,
+ uint32_t fpr_outs_mask,
+ uint32_t fpr_ins_mask,
+ uint32_t cc_outs_mask,
+ uint32_t cc_ins_mask);
+ void DsFsmInstrNop(uint32_t instruction);
+ void DsFsmInstrRrr(uint32_t instruction, Register out, Register in1, Register in2);
+ void DsFsmInstrRrrr(uint32_t instruction, Register in1_out, Register in2, Register in3);
+ void DsFsmInstrFff(uint32_t instruction, FRegister out, FRegister in1, FRegister in2);
+ void DsFsmInstrFfff(uint32_t instruction, FRegister in1_out, FRegister in2, FRegister in3);
+ void DsFsmInstrRf(uint32_t instruction, Register out, FRegister in);
+ void DsFsmInstrFr(uint32_t instruction, FRegister out, Register in);
+ void DsFsmInstrFR(uint32_t instruction, FRegister in1, Register in2);
+ void DsFsmInstrCff(uint32_t instruction, int cc_out, FRegister in1, FRegister in2);
+ void DsFsmInstrRrrc(uint32_t instruction, Register in1_out, Register in2, int cc_in);
+ void DsFsmInstrFffc(uint32_t instruction, FRegister in1_out, FRegister in2, int cc_in);
+ void DsFsmLabel();
+ void DsFsmCommitLabel();
+ void DsFsmDropLabel();
+ void MoveInstructionToDelaySlot(Branch& branch);
+ bool CanExchangeWithSlt(Register rs, Register rt) const;
+ void ExchangeWithSlt(const DelaySlot& forwarded_slot);
+ void GenerateSltForCondBranch(bool unsigned_slt, Register rs, Register rt);
+
Branch* GetBranch(uint32_t branch_id);
const Branch* GetBranch(uint32_t branch_id) const;
uint32_t GetBranchLocationOrPcRelBase(const MipsAssembler::Branch* branch) const;
@@ -1100,6 +1212,17 @@
// The current overwrite location.
uint32_t overwrite_location_;
+ // Whether instruction reordering (IOW, automatic filling of delay slots) is enabled.
+ bool reordering_;
+ // Information about the last instruction that may be used to fill a branch delay slot.
+ DelaySlot delay_slot_;
+ // Delay slot FSM state.
+ DsFsmState ds_fsm_state_;
+ // PC of the current labeled target instruction.
+ uint32_t ds_fsm_target_pc_;
+ // PCs of labeled target instructions.
+ std::vector<uint32_t> ds_fsm_target_pcs_;
+
// Use std::deque<> for literal labels to allow insertions at the end
// without invalidating pointers and references to existing elements.
ArenaDeque<Literal> literals_;
@@ -1109,7 +1232,7 @@
// that PC (from NAL) points to.
MipsLabel pc_rel_base_label_;
- // Data for AdjustedPosition(), see the description there.
+ // Data for GetAdjustedPosition(), see the description there.
uint32_t last_position_adjustment_;
uint32_t last_old_position_;
uint32_t last_branch_id_;
diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc
index 49ef272..fabb096 100644
--- a/compiler/utils/mips/assembler_mips32r6_test.cc
+++ b/compiler/utils/mips/assembler_mips32r6_test.cc
@@ -673,6 +673,144 @@
// BRANCHES //
//////////////
+TEST_F(AssemblerMIPS32r6Test, ImpossibleReordering) {
+ mips::MipsLabel label;
+ __ SetReorder(true);
+ __ Bind(&label);
+
+ __ CmpLtD(mips::F0, mips::F2, mips::F4);
+ __ Bc1nez(mips::F0, &label); // F0 dependency.
+
+ __ MulD(mips::F10, mips::F2, mips::F4);
+ __ Bc1eqz(mips::F10, &label); // F10 dependency.
+
+ std::string expected =
+ ".set noreorder\n"
+ "1:\n"
+
+ "cmp.lt.d $f0, $f2, $f4\n"
+ "bc1nez $f0, 1b\n"
+ "nop\n"
+
+ "mul.d $f10, $f2, $f4\n"
+ "bc1eqz $f10, 1b\n"
+ "nop\n";
+ DriverStr(expected, "ImpossibleReordering");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Reordering) {
+ mips::MipsLabel label;
+ __ SetReorder(true);
+ __ Bind(&label);
+
+ __ CmpLtD(mips::F0, mips::F2, mips::F4);
+ __ Bc1nez(mips::F2, &label);
+
+ __ MulD(mips::F0, mips::F2, mips::F4);
+ __ Bc1eqz(mips::F4, &label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "1:\n"
+
+ "bc1nez $f2, 1b\n"
+ "cmp.lt.d $f0, $f2, $f4\n"
+
+ "bc1eqz $f4, 1b\n"
+ "mul.d $f0, $f2, $f4\n";
+ DriverStr(expected, "Reordering");
+}
+
+TEST_F(AssemblerMIPS32r6Test, SetReorder) {
+ mips::MipsLabel label1, label2, label3, label4;
+
+ __ SetReorder(true);
+ __ Bind(&label1);
+ __ Addu(mips::T0, mips::T1, mips::T2);
+ __ Bc1nez(mips::F0, &label1);
+
+ __ SetReorder(false);
+ __ Bind(&label2);
+ __ Addu(mips::T0, mips::T1, mips::T2);
+ __ Bc1nez(mips::F0, &label2);
+
+ __ SetReorder(true);
+ __ Bind(&label3);
+ __ Addu(mips::T0, mips::T1, mips::T2);
+ __ Bc1eqz(mips::F0, &label3);
+
+ __ SetReorder(false);
+ __ Bind(&label4);
+ __ Addu(mips::T0, mips::T1, mips::T2);
+ __ Bc1eqz(mips::F0, &label4);
+
+ std::string expected =
+ ".set noreorder\n"
+ "1:\n"
+ "bc1nez $f0, 1b\n"
+ "addu $t0, $t1, $t2\n"
+
+ "2:\n"
+ "addu $t0, $t1, $t2\n"
+ "bc1nez $f0, 2b\n"
+ "nop\n"
+
+ "3:\n"
+ "bc1eqz $f0, 3b\n"
+ "addu $t0, $t1, $t2\n"
+
+ "4:\n"
+ "addu $t0, $t1, $t2\n"
+ "bc1eqz $f0, 4b\n"
+ "nop\n";
+ DriverStr(expected, "SetReorder");
+}
+
+TEST_F(AssemblerMIPS32r6Test, LongBranchReorder) {
+ mips::MipsLabel label;
+ __ SetReorder(true);
+ __ Subu(mips::T0, mips::T1, mips::T2);
+ __ Bc1nez(mips::F0, &label);
+ constexpr uint32_t kAdduCount1 = (1u << 15) + 1;
+ for (uint32_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+ constexpr uint32_t kAdduCount2 = (1u << 15) + 1;
+ for (uint32_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Subu(mips::T0, mips::T1, mips::T2);
+ __ Bc1eqz(mips::F0, &label);
+
+ uint32_t offset_forward = 2 + kAdduCount1; // 2: account for auipc and jic.
+ offset_forward <<= 2;
+ offset_forward += (offset_forward & 0x8000) << 1; // Account for sign extension in jic.
+
+ uint32_t offset_back = -(kAdduCount2 + 2); // 2: account for subu and bc1nez.
+ offset_back <<= 2;
+ offset_back += (offset_back & 0x8000) << 1; // Account for sign extension in jic.
+
+ std::ostringstream oss;
+ oss <<
+ ".set noreorder\n"
+ "subu $t0, $t1, $t2\n"
+ "bc1eqz $f0, 1f\n"
+ "auipc $at, 0x" << std::hex << High16Bits(offset_forward) << "\n"
+ "jic $at, 0x" << std::hex << Low16Bits(offset_forward) << "\n"
+ "1:\n" <<
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") <<
+ "2:\n" <<
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") <<
+ "subu $t0, $t1, $t2\n"
+ "bc1nez $f0, 3f\n"
+ "auipc $at, 0x" << std::hex << High16Bits(offset_back) << "\n"
+ "jic $at, 0x" << std::hex << Low16Bits(offset_back) << "\n"
+ "3:\n";
+ std::string expected = oss.str();
+ DriverStr(expected, "LongBeqc");
+}
+
// TODO: MipsAssembler::Addiupc
// MipsAssembler::Bc
// MipsAssembler::Jic
diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc
index 50a8dc2..708bc3d 100644
--- a/compiler/utils/mips/assembler_mips_test.cc
+++ b/compiler/utils/mips/assembler_mips_test.cc
@@ -2009,14 +2009,17 @@
}
TEST_F(AssemblerMIPSTest, Beq) {
+ __ SetReorder(false);
BranchCondTwoRegsHelper(&mips::MipsAssembler::Beq, "Beq");
}
TEST_F(AssemblerMIPSTest, Bne) {
+ __ SetReorder(false);
BranchCondTwoRegsHelper(&mips::MipsAssembler::Bne, "Bne");
}
TEST_F(AssemblerMIPSTest, Beqz) {
+ __ SetReorder(false);
mips::MipsLabel label;
__ Beqz(mips::A0, &label);
constexpr size_t kAdduCount1 = 63;
@@ -2043,6 +2046,7 @@
}
TEST_F(AssemblerMIPSTest, Bnez) {
+ __ SetReorder(false);
mips::MipsLabel label;
__ Bnez(mips::A0, &label);
constexpr size_t kAdduCount1 = 63;
@@ -2069,22 +2073,27 @@
}
TEST_F(AssemblerMIPSTest, Bltz) {
+ __ SetReorder(false);
BranchCondOneRegHelper(&mips::MipsAssembler::Bltz, "Bltz");
}
TEST_F(AssemblerMIPSTest, Bgez) {
+ __ SetReorder(false);
BranchCondOneRegHelper(&mips::MipsAssembler::Bgez, "Bgez");
}
TEST_F(AssemblerMIPSTest, Blez) {
+ __ SetReorder(false);
BranchCondOneRegHelper(&mips::MipsAssembler::Blez, "Blez");
}
TEST_F(AssemblerMIPSTest, Bgtz) {
+ __ SetReorder(false);
BranchCondOneRegHelper(&mips::MipsAssembler::Bgtz, "Bgtz");
}
TEST_F(AssemblerMIPSTest, Blt) {
+ __ SetReorder(false);
mips::MipsLabel label;
__ Blt(mips::A0, mips::A1, &label);
constexpr size_t kAdduCount1 = 63;
@@ -2113,6 +2122,7 @@
}
TEST_F(AssemblerMIPSTest, Bge) {
+ __ SetReorder(false);
mips::MipsLabel label;
__ Bge(mips::A0, mips::A1, &label);
constexpr size_t kAdduCount1 = 63;
@@ -2141,6 +2151,7 @@
}
TEST_F(AssemblerMIPSTest, Bltu) {
+ __ SetReorder(false);
mips::MipsLabel label;
__ Bltu(mips::A0, mips::A1, &label);
constexpr size_t kAdduCount1 = 63;
@@ -2169,6 +2180,7 @@
}
TEST_F(AssemblerMIPSTest, Bgeu) {
+ __ SetReorder(false);
mips::MipsLabel label;
__ Bgeu(mips::A0, mips::A1, &label);
constexpr size_t kAdduCount1 = 63;
@@ -2197,6 +2209,7 @@
}
TEST_F(AssemblerMIPSTest, Bc1f) {
+ __ SetReorder(false);
mips::MipsLabel label;
__ Bc1f(0, &label);
constexpr size_t kAdduCount1 = 63;
@@ -2223,6 +2236,7 @@
}
TEST_F(AssemblerMIPSTest, Bc1t) {
+ __ SetReorder(false);
mips::MipsLabel label;
__ Bc1t(0, &label);
constexpr size_t kAdduCount1 = 63;
@@ -2331,6 +2345,410 @@
DriverStr(expected, "LoadNearestFarLiteral");
}
+TEST_F(AssemblerMIPSTest, ImpossibleReordering) {
+ mips::MipsLabel label1, label2;
+ __ SetReorder(true);
+
+ __ B(&label1); // No preceding or target instruction for the delay slot.
+
+ __ Addu(mips::T0, mips::T1, mips::T2);
+ __ Bind(&label1);
+ __ B(&label1); // The preceding label prevents moving Addu into the delay slot.
+ __ B(&label1); // No preceding or target instruction for the delay slot.
+
+ __ Addu(mips::T0, mips::T1, mips::T2);
+ __ Beqz(mips::T0, &label1); // T0 dependency.
+
+ __ Or(mips::T1, mips::T2, mips::T3);
+ __ Bne(mips::T2, mips::T1, &label1); // T1 dependency.
+
+ __ And(mips::T0, mips::T1, mips::T2);
+ __ Blt(mips::T1, mips::T0, &label1); // T0 dependency.
+
+ __ Xor(mips::AT, mips::T0, mips::T1);
+ __ Bge(mips::T1, mips::T0, &label1); // AT dependency.
+
+ __ Subu(mips::T0, mips::T1, mips::AT);
+ __ Bltu(mips::T1, mips::T0, &label1); // AT dependency.
+
+ __ ColtS(1, mips::F2, mips::F4);
+ __ Bc1t(1, &label1); // cc1 dependency.
+
+ __ Move(mips::T0, mips::RA);
+ __ Bal(&label1); // RA dependency.
+
+ __ Lw(mips::RA, mips::T0, 0);
+ __ Bal(&label1); // RA dependency.
+
+ __ LlR2(mips::T9, mips::T0, 0);
+ __ Jalr(mips::T9); // T9 dependency.
+
+ __ Sw(mips::RA, mips::T0, 0);
+ __ Jalr(mips::T9); // RA dependency.
+
+ __ Lw(mips::T1, mips::T0, 0);
+ __ Jalr(mips::T1, mips::T9); // T1 dependency.
+
+ __ ScR2(mips::T9, mips::T0, 0);
+ __ Jr(mips::T9); // T9 dependency.
+
+ __ Bind(&label2);
+
+ __ Bnez(mips::T0, &label2); // No preceding instruction for the delay slot.
+
+ __ Bgeu(mips::T1, mips::T0, &label2); // No preceding instruction for the delay slot.
+
+ __ Bc1f(2, &label2); // No preceding instruction for the delay slot.
+
+ __ Bal(&label2); // No preceding instruction for the delay slot.
+
+ __ Jalr(mips::T9); // No preceding instruction for the delay slot.
+
+ __ Addu(mips::T0, mips::T1, mips::T2);
+ __ CodePosition(); // Drops the delay slot candidate (the last instruction).
+ __ Beq(mips::T1, mips::T2, &label2); // No preceding or target instruction for the delay slot.
+
+ std::string expected =
+ ".set noreorder\n"
+ "b 1f\n"
+ "nop\n"
+
+ "addu $t0, $t1, $t2\n"
+ "1:\n"
+ "b 1b\n"
+ "nop\n"
+ "b 1b\n"
+ "nop\n"
+
+ "addu $t0, $t1, $t2\n"
+ "beq $zero, $t0, 1b\n"
+ "nop\n"
+
+ "or $t1, $t2, $t3\n"
+ "bne $t2, $t1, 1b\n"
+ "nop\n"
+
+ "and $t0, $t1, $t2\n"
+ "slt $at, $t1, $t0\n"
+ "bne $zero, $at, 1b\n"
+ "nop\n"
+
+ "xor $at, $t0, $t1\n"
+ "slt $at, $t1, $t0\n"
+ "beq $zero, $at, 1b\n"
+ "nop\n"
+
+ "subu $t0, $t1, $at\n"
+ "sltu $at, $t1, $t0\n"
+ "bne $zero, $at, 1b\n"
+ "nop\n"
+
+ "c.olt.s $fcc1, $f2, $f4\n"
+ "bc1t $fcc1, 1b\n"
+ "nop\n"
+
+ "or $t0, $ra, $zero\n"
+ "bal 1b\n"
+ "nop\n"
+
+ "lw $ra, 0($t0)\n"
+ "bal 1b\n"
+ "nop\n"
+
+ "ll $t9, 0($t0)\n"
+ "jalr $t9\n"
+ "nop\n"
+
+ "sw $ra, 0($t0)\n"
+ "jalr $t9\n"
+ "nop\n"
+
+ "lw $t1, 0($t0)\n"
+ "jalr $t1, $t9\n"
+ "nop\n"
+
+ "sc $t9, 0($t0)\n"
+ "jalr $zero, $t9\n"
+ "nop\n"
+
+ "2:\n"
+
+ "bne $zero, $t0, 2b\n"
+ "nop\n"
+
+ "sltu $at, $t1, $t0\n"
+ "beq $zero, $at, 2b\n"
+ "nop\n"
+
+ "bc1f $fcc2, 2b\n"
+ "nop\n"
+
+ "bal 2b\n"
+ "nop\n"
+
+ "jalr $t9\n"
+ "nop\n"
+
+ "addu $t0, $t1, $t2\n"
+ "beq $t1, $t2, 2b\n"
+ "nop\n";
+ DriverStr(expected, "ImpossibleReordering");
+}
+
+TEST_F(AssemblerMIPSTest, Reordering) {
+ mips::MipsLabel label1, label2;
+ __ SetReorder(true);
+
+ __ Bind(&label1);
+ __ Bind(&label2);
+
+ __ Addu(mips::T0, mips::T1, mips::T2);
+ __ Beqz(mips::T1, &label1);
+
+ __ Or(mips::T1, mips::T2, mips::T3);
+ __ Bne(mips::T2, mips::T3, &label1);
+
+ __ And(mips::T0, mips::T1, mips::T2);
+ __ Blt(mips::T1, mips::T2, &label1);
+
+ __ Xor(mips::T2, mips::T0, mips::T1);
+ __ Bge(mips::T1, mips::T0, &label1);
+
+ __ Subu(mips::T2, mips::T1, mips::T0);
+ __ Bltu(mips::T1, mips::T0, &label1);
+
+ __ ColtS(0, mips::F2, mips::F4);
+ __ Bc1t(1, &label1);
+
+ __ Move(mips::T0, mips::T1);
+ __ Bal(&label1);
+
+ __ LlR2(mips::T1, mips::T0, 0);
+ __ Jalr(mips::T9);
+
+ __ ScR2(mips::T1, mips::T0, 0);
+ __ Jr(mips::T9);
+
+ std::string expected =
+ ".set noreorder\n"
+ "1:\n"
+
+ "beq $zero, $t1, 1b\n"
+ "addu $t0, $t1, $t2\n"
+
+ "bne $t2, $t3, 1b\n"
+ "or $t1, $t2, $t3\n"
+
+ "slt $at, $t1, $t2\n"
+ "bne $zero, $at, 1b\n"
+ "and $t0, $t1, $t2\n"
+
+ "slt $at, $t1, $t0\n"
+ "beq $zero, $at, 1b\n"
+ "xor $t2, $t0, $t1\n"
+
+ "sltu $at, $t1, $t0\n"
+ "bne $zero, $at, 1b\n"
+ "subu $t2, $t1, $t0\n"
+
+ "bc1t $fcc1, 1b\n"
+ "c.olt.s $fcc0, $f2, $f4\n"
+
+ "bal 1b\n"
+ "or $t0, $t1, $zero\n"
+
+ "jalr $t9\n"
+ "ll $t1, 0($t0)\n"
+
+ "jalr $zero, $t9\n"
+ "sc $t1, 0($t0)\n";
+ DriverStr(expected, "Reordering");
+}
+
+TEST_F(AssemblerMIPSTest, AbsorbTargetInstruction) {
+ mips::MipsLabel label1, label2, label3, label4, label5, label6;
+ __ SetReorder(true);
+
+ __ B(&label1);
+ __ Bind(&label1);
+ __ Addu(mips::T0, mips::T1, mips::T2);
+
+ __ Bind(&label2);
+ __ Xor(mips::T0, mips::T1, mips::T2);
+ __ Addu(mips::T0, mips::T1, mips::T2);
+ __ Bind(&label3); // Prevents reordering ADDU above with B below.
+ __ B(&label2);
+
+ __ B(&label4);
+ __ Bind(&label4);
+ __ Addu(mips::T0, mips::T1, mips::T2);
+ __ CodePosition(); // Prevents absorbing ADDU above.
+
+ __ B(&label5);
+ __ Bind(&label5);
+ __ Addu(mips::T0, mips::T1, mips::T2);
+ __ Bind(&label6);
+ __ CodePosition(); // Even across Bind(), CodePosition() prevents absorbing the ADDU above.
+
+ std::string expected =
+ ".set noreorder\n"
+ "b 1f\n"
+ "addu $t0, $t1, $t2\n"
+ "addu $t0, $t1, $t2\n"
+ "1:\n"
+
+ "xor $t0, $t1, $t2\n"
+ "2:\n"
+ "addu $t0, $t1, $t2\n"
+ "b 2b\n"
+ "xor $t0, $t1, $t2\n"
+
+ "b 4f\n"
+ "nop\n"
+ "4:\n"
+ "addu $t0, $t1, $t2\n"
+
+ "b 5f\n"
+ "nop\n"
+ "5:\n"
+ "addu $t0, $t1, $t2\n";
+ DriverStr(expected, "AbsorbTargetInstruction");
+}
+
+TEST_F(AssemblerMIPSTest, SetReorder) {
+ mips::MipsLabel label1, label2, label3, label4, label5, label6;
+
+ __ SetReorder(true);
+ __ Bind(&label1);
+ __ Addu(mips::T0, mips::T1, mips::T2);
+ __ B(&label1);
+ __ B(&label5);
+ __ B(&label6);
+
+ __ SetReorder(false);
+ __ Bind(&label2);
+ __ Addu(mips::T0, mips::T1, mips::T2);
+ __ B(&label2);
+ __ B(&label5);
+ __ B(&label6);
+
+ __ SetReorder(true);
+ __ Bind(&label3);
+ __ Addu(mips::T0, mips::T1, mips::T2);
+ __ B(&label3);
+ __ B(&label5);
+ __ B(&label6);
+
+ __ SetReorder(false);
+ __ Bind(&label4);
+ __ Addu(mips::T0, mips::T1, mips::T2);
+ __ B(&label4);
+ __ B(&label5);
+ __ B(&label6);
+
+ __ SetReorder(true);
+ __ Bind(&label5);
+ __ Subu(mips::T0, mips::T1, mips::T2);
+
+ __ SetReorder(false);
+ __ Bind(&label6);
+ __ Xor(mips::T0, mips::T1, mips::T2);
+
+ std::string expected =
+ ".set noreorder\n"
+ "1:\n"
+ "b 1b\n"
+ "addu $t0, $t1, $t2\n"
+ "b 55f\n"
+ "subu $t0, $t1, $t2\n"
+ "b 6f\n"
+ "nop\n"
+
+ "2:\n"
+ "addu $t0, $t1, $t2\n"
+ "b 2b\n"
+ "nop\n"
+ "b 5f\n"
+ "nop\n"
+ "b 6f\n"
+ "nop\n"
+
+ "3:\n"
+ "b 3b\n"
+ "addu $t0, $t1, $t2\n"
+ "b 55f\n"
+ "subu $t0, $t1, $t2\n"
+ "b 6f\n"
+ "nop\n"
+
+ "4:\n"
+ "addu $t0, $t1, $t2\n"
+ "b 4b\n"
+ "nop\n"
+ "b 5f\n"
+ "nop\n"
+ "b 6f\n"
+ "nop\n"
+
+ "5:\n"
+ "subu $t0, $t1, $t2\n"
+ "55:\n"
+ "6:\n"
+ "xor $t0, $t1, $t2\n";
+ DriverStr(expected, "SetReorder");
+}
+
+TEST_F(AssemblerMIPSTest, LongBranchReorder) {
+ mips::MipsLabel label;
+ __ SetReorder(true);
+ __ Subu(mips::T0, mips::T1, mips::T2);
+ __ B(&label);
+ constexpr uint32_t kAdduCount1 = (1u << 15) + 1;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+ constexpr uint32_t kAdduCount2 = (1u << 15) + 1;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Subu(mips::T0, mips::T1, mips::T2);
+ __ B(&label);
+
+ // Account for 5 extra instructions: ori, addu, lw, jalr, addiu.
+ uint32_t offset_forward = (kAdduCount1 + 5) * sizeof(uint32_t);
+ // Account for 5 extra instructions: subu, addiu, sw, nal, lui.
+ uint32_t offset_back = -(kAdduCount1 + 5) * sizeof(uint32_t);
+
+ std::ostringstream oss;
+ oss <<
+ ".set noreorder\n"
+ "subu $t0, $t1, $t2\n"
+ "addiu $sp, $sp, -4\n"
+ "sw $ra, 0($sp)\n"
+ "bltzal $zero, .+4\n"
+ "lui $at, 0x" << std::hex << High16Bits(offset_forward) << "\n"
+ "ori $at, $at, 0x" << std::hex << Low16Bits(offset_forward) << "\n"
+ "addu $at, $at, $ra\n"
+ "lw $ra, 0($sp)\n"
+ "jalr $zero, $at\n"
+ "addiu $sp, $sp, 4\n" <<
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") <<
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") <<
+ "subu $t0, $t1, $t2\n"
+ "addiu $sp, $sp, -4\n"
+ "sw $ra, 0($sp)\n"
+ "bltzal $zero, .+4\n"
+ "lui $at, 0x" << std::hex << High16Bits(offset_back) << "\n"
+ "ori $at, $at, 0x" << std::hex << Low16Bits(offset_back) << "\n"
+ "addu $at, $at, $ra\n"
+ "lw $ra, 0($sp)\n"
+ "jalr $zero, $at\n"
+ "addiu $sp, $sp, 4\n";
+ std::string expected = oss.str();
+ DriverStr(expected, "LongBranchReorder");
+}
+
#undef __
} // namespace art
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index f2ef41f..cd30872 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1708,6 +1708,13 @@
}
+void X86Assembler::repne_scasb() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0xAE);
+}
+
+
void X86Assembler::repne_scasw() {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
@@ -1716,6 +1723,13 @@
}
+void X86Assembler::repe_cmpsb() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0xA6);
+}
+
+
void X86Assembler::repe_cmpsw() {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
@@ -1731,6 +1745,13 @@
}
+void X86Assembler::rep_movsb() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0xA4);
+}
+
+
void X86Assembler::rep_movsw() {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 2ddcd76..9738784 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -592,9 +592,12 @@
void jmp(Label* label);
void jmp(NearLabel* label);
+ void repne_scasb();
void repne_scasw();
+ void repe_cmpsb();
void repe_cmpsw();
void repe_cmpsl();
+ void rep_movsb();
void rep_movsw();
X86Assembler* lock();
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index 61d70d7..9bae6c2 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -207,12 +207,24 @@
DriverStr(expected, "FPUIntegerStore");
}
+TEST_F(AssemblerX86Test, Repnescasb) {
+ GetAssembler()->repne_scasb();
+ const char* expected = "repne scasb\n";
+ DriverStr(expected, "Repnescasb");
+}
+
TEST_F(AssemblerX86Test, Repnescasw) {
GetAssembler()->repne_scasw();
const char* expected = "repne scasw\n";
DriverStr(expected, "Repnescasw");
}
+TEST_F(AssemblerX86Test, Repecmpsb) {
+ GetAssembler()->repe_cmpsb();
+ const char* expected = "repe cmpsb\n";
+ DriverStr(expected, "Repecmpsb");
+}
+
TEST_F(AssemblerX86Test, Repecmpsw) {
GetAssembler()->repe_cmpsw();
const char* expected = "repe cmpsw\n";
@@ -225,10 +237,10 @@
DriverStr(expected, "Repecmpsl");
}
-TEST_F(AssemblerX86Test, RepneScasw) {
- GetAssembler()->repne_scasw();
- const char* expected = "repne scasw\n";
- DriverStr(expected, "repne_scasw");
+TEST_F(AssemblerX86Test, RepMovsb) {
+ GetAssembler()->rep_movsb();
+ const char* expected = "rep movsb\n";
+ DriverStr(expected, "rep_movsb");
}
TEST_F(AssemblerX86Test, RepMovsw) {
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 1f73aa7..e9a0607 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -2325,6 +2325,12 @@
EmitOperand(dst.LowBits(), src);
}
+void X86_64Assembler::repne_scasb() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0xAE);
+}
+
void X86_64Assembler::repne_scasw() {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
@@ -2332,7 +2338,6 @@
EmitUint8(0xAF);
}
-
void X86_64Assembler::repe_cmpsw() {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 3a4bfca..fdd3aa9 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -670,6 +670,7 @@
void rolq(CpuRegister reg, const Immediate& imm);
void rolq(CpuRegister operand, CpuRegister shifter);
+ void repne_scasb();
void repne_scasw();
void repe_cmpsw();
void repe_cmpsl();
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 48a1876..ff01429 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -956,6 +956,12 @@
DriverStr(expected, "xorq");
}
+TEST_F(AssemblerX86_64Test, RepneScasb) {
+ GetAssembler()->repne_scasb();
+ const char* expected = "repne scasb\n";
+ DriverStr(expected, "repne_scasb");
+}
+
TEST_F(AssemblerX86_64Test, RepneScasw) {
GetAssembler()->repne_scasw();
const char* expected = "repne scasw\n";
diff --git a/dalvikvm/Android.bp b/dalvikvm/Android.bp
new file mode 100644
index 0000000..ab645bb
--- /dev/null
+++ b/dalvikvm/Android.bp
@@ -0,0 +1,63 @@
+//
+// Copyright (C) 2013 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+art_cc_binary {
+ name: "dalvikvm",
+ host_supported: true,
+ compile_multilib: "both",
+
+ srcs: ["dalvikvm.cc"],
+ cflags: [
+ "-Wall",
+ "-Werror",
+ "-Wextra",
+ ],
+ include_dirs: ["art/runtime"],
+ shared_libs: [
+ "libnativehelper",
+ ],
+ whole_static_libs: ["libsigchain"],
+ target: {
+ host: {
+ host_ldlibs: [
+ "-ldl",
+ "-lpthread",
+ ],
+ },
+ android: {
+ shared_libs: [
+ "libdl",
+ "liblog",
+ ],
+ ldflags: ["-Wl,--export-dynamic"],
+ },
+ linux: {
+ ldflags: ["-Wl,--export-dynamic"],
+ },
+ },
+
+ multilib: {
+ lib32: {
+ suffix: "32",
+ },
+ lib64: {
+ suffix: "64",
+ },
+ },
+
+ // Create symlink for the primary version target.
+ symlink_preferred_arch: true,
+}
diff --git a/dalvikvm/Android.mk b/dalvikvm/Android.mk
deleted file mode 100644
index 6c0bcb1..0000000
--- a/dalvikvm/Android.mk
+++ /dev/null
@@ -1,87 +0,0 @@
-#
-# Copyright (C) 2013 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-LOCAL_PATH := $(call my-dir)
-
-include art/build/Android.common.mk
-
-dalvikvm_cflags := -Wall -Werror -Wextra
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := dalvikvm
-LOCAL_MODULE_TAGS := optional
-LOCAL_CPP_EXTENSION := cc
-LOCAL_SRC_FILES := dalvikvm.cc
-LOCAL_CFLAGS := $(dalvikvm_cflags)
-LOCAL_C_INCLUDES := art/runtime
-LOCAL_SHARED_LIBRARIES := libdl liblog libnativehelper
-LOCAL_WHOLE_STATIC_LIBRARIES := libsigchain
-LOCAL_LDFLAGS := -Wl,--export-dynamic
-LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
-LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.common.mk
-LOCAL_MULTILIB := both
-LOCAL_MODULE_STEM_32 := dalvikvm32
-LOCAL_MODULE_STEM_64 := dalvikvm64
-LOCAL_NATIVE_COVERAGE := $(ART_COVERAGE)
-include $(BUILD_EXECUTABLE)
-
-# Create symlink for the primary version target.
-include $(BUILD_SYSTEM)/executable_prefer_symlink.mk
-
-ART_TARGET_EXECUTABLES += $(TARGET_OUT_EXECUTABLES)/$(LOCAL_MODULE)
-ART_TARGET_EXECUTABLES += $(TARGET_OUT_EXECUTABLES)/$(LOCAL_MODULE)$(ART_PHONY_TEST_TARGET_SUFFIX)
-ifdef 2ND_ART_PHONY_TEST_TARGET_SUFFIX
- ART_TARGET_EXECUTABLES += $(TARGET_OUT_EXECUTABLES)/$(LOCAL_MODULE)$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
-endif
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := dalvikvm
-LOCAL_MODULE_TAGS := optional
-LOCAL_CLANG := true
-LOCAL_CPP_EXTENSION := cc
-LOCAL_SRC_FILES := dalvikvm.cc
-LOCAL_CFLAGS := $(dalvikvm_cflags)
-LOCAL_C_INCLUDES := art/runtime
-LOCAL_SHARED_LIBRARIES := libnativehelper
-LOCAL_WHOLE_STATIC_LIBRARIES := libsigchain
-LOCAL_LDFLAGS := -ldl -lpthread
-# Mac OS linker doesn't understand --export-dynamic.
-ifneq ($(HOST_OS),darwin)
- LOCAL_LDFLAGS += -Wl,--export-dynamic
-endif
-LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
-LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.common.mk
-LOCAL_IS_HOST_MODULE := true
-LOCAL_MULTILIB := both
-ifdef ART_MULTILIB_OVERRIDE_host
- LOCAL_MULTILIB := $(ART_MULTILIB_OVERRIDE_host)
-endif
-ifeq ($(LOCAL_MULTILIB),both)
-LOCAL_MODULE_STEM_32 := dalvikvm32
-LOCAL_MODULE_STEM_64 := dalvikvm64
-endif
-LOCAL_NATIVE_COVERAGE := $(ART_COVERAGE)
-include $(BUILD_HOST_EXECUTABLE)
-# Create symlink for the primary version target.
-ifeq ($(LOCAL_MULTILIB),both)
-include $(BUILD_SYSTEM)/executable_prefer_symlink.mk
-
-ART_HOST_EXECUTABLES += $(HOST_OUT_EXECUTABLES)/$(LOCAL_MODULE)$(ART_PHONY_TEST_HOST_SUFFIX)
-ifdef 2ND_ART_PHONY_TEST_HOST_SUFFIX
- ART_HOST_EXECUTABLES += $(HOST_OUT_EXECUTABLES)/$(LOCAL_MODULE)$(2ND_ART_PHONY_TEST_HOST_SUFFIX)
-endif
-endif
-ART_HOST_EXECUTABLES += $(HOST_OUT_EXECUTABLES)/$(LOCAL_MODULE)
diff --git a/dex2oat/Android.mk b/dex2oat/Android.mk
index 8a179c1..32424ca 100644
--- a/dex2oat/Android.mk
+++ b/dex2oat/Android.mk
@@ -55,35 +55,19 @@
$(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libcutils libartd-compiler libsigchain,art/compiler,target,debug,$(dex2oat_target_arch)))
endif
-# Note: the order is important because of static linking resolution.
-DEX2OAT_STATIC_DEPENDENCIES := \
- libziparchive \
- libnativehelper \
- libnativebridge \
- libnativeloader \
- libsigchain_dummy \
- liblog \
- libz \
- libbacktrace \
- libcutils \
- libunwindbacktrace \
- libutils \
- libbase \
- liblz4 \
- liblzma
-
-# We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target.
+# We always build dex2oat and dependencies, even if the host build is
+# otherwise disabled, since they are used to cross compile for the target.
ifeq ($(ART_BUILD_HOST_NDEBUG),true)
$(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libcutils libart-compiler libsigchain libziparchive liblz4,art/compiler,host,ndebug,$(dex2oat_host_arch)))
ifeq ($(ART_BUILD_HOST_STATIC),true)
- $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libart libart-compiler libart libvixl-arm libvixl-arm64 $(DEX2OAT_STATIC_DEPENDENCIES),art/compiler,host,ndebug,$(dex2oat_host_arch),static))
+ $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libart libart-compiler libart libvixl-arm libvixl-arm64 $(ART_STATIC_DEPENDENCIES),art/compiler,host,ndebug,$(dex2oat_host_arch),static))
endif
endif
ifeq ($(ART_BUILD_HOST_DEBUG),true)
$(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libcutils libartd-compiler libsigchain libziparchive liblz4,art/compiler,host,debug,$(dex2oat_host_arch)))
ifeq ($(ART_BUILD_HOST_STATIC),true)
- $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libartd libartd-compiler libartd libvixld-arm libvixld-arm64 $(DEX2OAT_STATIC_DEPENDENCIES),art/compiler,host,debug,$(dex2oat_host_arch),static))
+ $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libartd libartd-compiler libartd libvixld-arm libvixld-arm64 $(ART_STATIC_DEPENDENCIES),art/compiler,host,debug,$(dex2oat_host_arch),static))
endif
endif
diff --git a/dexdump/Android.bp b/dexdump/Android.bp
new file mode 100644
index 0000000..e77f809
--- /dev/null
+++ b/dexdump/Android.bp
@@ -0,0 +1,26 @@
+// Copyright (C) 2015 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// TODO(ajcbik): rename dexdump2 into dexdump when Dalvik version is removed
+
+art_cc_binary {
+ name: "dexdump2",
+ host_supported: true,
+ srcs: [
+ "dexdump_main.cc",
+ "dexdump.cc",
+ ],
+ cflags: ["-Wall"],
+ shared_libs: ["libart"],
+}
diff --git a/dexdump/Android.mk b/dexdump/Android.mk
deleted file mode 100755
index ec2529e..0000000
--- a/dexdump/Android.mk
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# TODO(ajcbik): Art-i-fy this makefile
-
-# TODO(ajcbik): rename dexdump2 into dexdump when Dalvik version is removed
-
-LOCAL_PATH:= $(call my-dir)
-
-dexdump_src_files := dexdump_main.cc dexdump.cc
-dexdump_c_includes := art/runtime
-dexdump_libraries := libart
-
-##
-## Build the device command line tool dexdump.
-##
-
-ifneq ($(SDK_ONLY),true) # SDK_only doesn't need device version
-include $(CLEAR_VARS)
-LOCAL_CPP_EXTENSION := cc
-LOCAL_SRC_FILES := $(dexdump_src_files)
-LOCAL_C_INCLUDES := $(dexdump_c_includes)
-LOCAL_CFLAGS += -Wall
-LOCAL_SHARED_LIBRARIES += $(dexdump_libraries)
-LOCAL_MODULE := dexdump2
-include $(BUILD_EXECUTABLE)
-endif # !SDK_ONLY
-
-##
-## Build the host command line tool dexdump.
-##
-
-include $(CLEAR_VARS)
-LOCAL_CPP_EXTENSION := cc
-LOCAL_SRC_FILES := $(dexdump_src_files)
-LOCAL_C_INCLUDES := $(dexdump_c_includes)
-LOCAL_CFLAGS += -Wall
-LOCAL_SHARED_LIBRARIES += $(dexdump_libraries)
-LOCAL_MODULE := dexdump2
-LOCAL_MULTILIB := $(ART_MULTILIB_OVERRIDE_host)
-include $(BUILD_HOST_EXECUTABLE)
diff --git a/dexlayout/Android.bp b/dexlayout/Android.bp
new file mode 100644
index 0000000..852f6c2
--- /dev/null
+++ b/dexlayout/Android.bp
@@ -0,0 +1,25 @@
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+art_cc_binary {
+ name: "dexlayout",
+ host_supported: true,
+ srcs: [
+ "dexlayout_main.cc",
+ "dexlayout.cc",
+ "dex_ir_builder.cc",
+ ],
+ cflags: ["-Wall"],
+ shared_libs: ["libart"],
+}
diff --git a/dexlayout/dex_ir.h b/dexlayout/dex_ir.h
new file mode 100644
index 0000000..cbb4404
--- /dev/null
+++ b/dexlayout/dex_ir.h
@@ -0,0 +1,748 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Header file of an in-memory representation of DEX files.
+ */
+
+#ifndef ART_DEXLAYOUT_DEX_IR_H_
+#define ART_DEXLAYOUT_DEX_IR_H_
+
+#include <vector>
+#include <stdint.h>
+
+#include "dex_file-inl.h"
+
+namespace art {
+namespace dex_ir {
+
+// Forward declarations for classes used in containers or pointed to.
+class AnnotationsDirectoryItem;
+class AnnotationSetItem;
+class ArrayItem;
+class ClassData;
+class ClassDef;
+class CodeItem;
+class DebugInfoItem;
+class FieldId;
+class FieldItem;
+class Header;
+class MapList;
+class MapItem;
+class MethodId;
+class MethodItem;
+class ProtoId;
+class StringId;
+class TryItem;
+class TypeId;
+
+// Visitor support
+class AbstractDispatcher {
+ public:
+ AbstractDispatcher() = default;
+ virtual ~AbstractDispatcher() { }
+
+ virtual void Dispatch(Header* header) = 0;
+ virtual void Dispatch(const StringId* string_id) = 0;
+ virtual void Dispatch(const TypeId* type_id) = 0;
+ virtual void Dispatch(const ProtoId* proto_id) = 0;
+ virtual void Dispatch(const FieldId* field_id) = 0;
+ virtual void Dispatch(const MethodId* method_id) = 0;
+ virtual void Dispatch(ClassData* class_data) = 0;
+ virtual void Dispatch(ClassDef* class_def) = 0;
+ virtual void Dispatch(FieldItem* field_item) = 0;
+ virtual void Dispatch(MethodItem* method_item) = 0;
+ virtual void Dispatch(ArrayItem* array_item) = 0;
+ virtual void Dispatch(CodeItem* code_item) = 0;
+ virtual void Dispatch(TryItem* try_item) = 0;
+ virtual void Dispatch(DebugInfoItem* debug_info_item) = 0;
+ virtual void Dispatch(AnnotationSetItem* annotation_set_item) = 0;
+ virtual void Dispatch(AnnotationsDirectoryItem* annotations_directory_item) = 0;
+ virtual void Dispatch(MapList* map_list) = 0;
+ virtual void Dispatch(MapItem* map_item) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AbstractDispatcher);
+};
+
+// Collections become owners of the objects added by moving them into unique pointers.
+template<class T> class CollectionWithOffset {
+ public:
+ CollectionWithOffset() = default;
+ std::vector<std::unique_ptr<T>>& Collection() { return collection_; }
+ // Read-time support methods
+ void AddWithPosition(uint32_t position, T* object) {
+ collection_.push_back(std::unique_ptr<T>(object));
+ collection_.back()->SetOffset(position);
+ }
+ // Ordinary object insertion into collection.
+ void Insert(T object ATTRIBUTE_UNUSED) {
+ // TODO(sehr): add ordered insertion support.
+ UNIMPLEMENTED(FATAL) << "Insertion not ready";
+ }
+ uint32_t GetOffset() const { return offset_; }
+ void SetOffset(uint32_t new_offset) { offset_ = new_offset; }
+ uint32_t Size() const { return collection_.size(); }
+
+ private:
+ std::vector<std::unique_ptr<T>> collection_;
+ uint32_t offset_ = 0;
+ DISALLOW_COPY_AND_ASSIGN(CollectionWithOffset);
+};
+
+class Item {
+ public:
+ virtual ~Item() { }
+
+ uint32_t GetOffset() const { return offset_; }
+ void SetOffset(uint32_t offset) { offset_ = offset; }
+
+ protected:
+ uint32_t offset_ = 0;
+};
+
+class Header : public Item {
+ public:
+ Header(const uint8_t* magic,
+ uint32_t checksum,
+ const uint8_t* signature,
+ uint32_t endian_tag,
+ uint32_t file_size,
+ uint32_t header_size,
+ uint32_t link_size,
+ uint32_t link_offset,
+ uint32_t data_size,
+ uint32_t data_offset)
+ : checksum_(checksum),
+ endian_tag_(endian_tag),
+ file_size_(file_size),
+ header_size_(header_size),
+ link_size_(link_size),
+ link_offset_(link_offset),
+ data_size_(data_size),
+ data_offset_(data_offset) {
+ memcpy(magic_, magic, sizeof(magic_));
+ memcpy(signature_, signature, sizeof(signature_));
+ }
+ ~Header() OVERRIDE { }
+
+ const uint8_t* Magic() const { return magic_; }
+ uint32_t Checksum() const { return checksum_; }
+ const uint8_t* Signature() const { return signature_; }
+ uint32_t EndianTag() const { return endian_tag_; }
+ uint32_t FileSize() const { return file_size_; }
+ uint32_t HeaderSize() const { return header_size_; }
+ uint32_t LinkSize() const { return link_size_; }
+ uint32_t LinkOffset() const { return link_offset_; }
+ uint32_t DataSize() const { return data_size_; }
+ uint32_t DataOffset() const { return data_offset_; }
+
+ void SetChecksum(uint32_t new_checksum) { checksum_ = new_checksum; }
+ void SetSignature(const uint8_t* new_signature) {
+ memcpy(signature_, new_signature, sizeof(signature_));
+ }
+ void SetFileSize(uint32_t new_file_size) { file_size_ = new_file_size; }
+ void SetHeaderSize(uint32_t new_header_size) { header_size_ = new_header_size; }
+ void SetLinkSize(uint32_t new_link_size) { link_size_ = new_link_size; }
+ void SetLinkOffset(uint32_t new_link_offset) { link_offset_ = new_link_offset; }
+ void SetDataSize(uint32_t new_data_size) { data_size_ = new_data_size; }
+ void SetDataOffset(uint32_t new_data_offset) { data_offset_ = new_data_offset; }
+
+ // Collections.
+ std::vector<std::unique_ptr<StringId>>& StringIds() { return string_ids_.Collection(); }
+ std::vector<std::unique_ptr<TypeId>>& TypeIds() { return type_ids_.Collection(); }
+ std::vector<std::unique_ptr<ProtoId>>& ProtoIds() { return proto_ids_.Collection(); }
+ std::vector<std::unique_ptr<FieldId>>& FieldIds() { return field_ids_.Collection(); }
+ std::vector<std::unique_ptr<MethodId>>& MethodIds() { return method_ids_.Collection(); }
+ std::vector<std::unique_ptr<ClassDef>>& ClassDefs() { return class_defs_.Collection(); }
+ uint32_t StringIdsOffset() const { return string_ids_.GetOffset(); }
+ uint32_t TypeIdsOffset() const { return type_ids_.GetOffset(); }
+ uint32_t ProtoIdsOffset() const { return proto_ids_.GetOffset(); }
+ uint32_t FieldIdsOffset() const { return field_ids_.GetOffset(); }
+ uint32_t MethodIdsOffset() const { return method_ids_.GetOffset(); }
+ uint32_t ClassDefsOffset() const { return class_defs_.GetOffset(); }
+ void SetStringIdsOffset(uint32_t new_offset) { string_ids_.SetOffset(new_offset); }
+ void SetTypeIdsOffset(uint32_t new_offset) { type_ids_.SetOffset(new_offset); }
+ void SetProtoIdsOffset(uint32_t new_offset) { proto_ids_.SetOffset(new_offset); }
+ void SetFieldIdsOffset(uint32_t new_offset) { field_ids_.SetOffset(new_offset); }
+ void SetMethodIdsOffset(uint32_t new_offset) { method_ids_.SetOffset(new_offset); }
+ void SetClassDefsOffset(uint32_t new_offset) { class_defs_.SetOffset(new_offset); }
+ uint32_t StringIdsSize() const { return string_ids_.Size(); }
+ uint32_t TypeIdsSize() const { return type_ids_.Size(); }
+ uint32_t ProtoIdsSize() const { return proto_ids_.Size(); }
+ uint32_t FieldIdsSize() const { return field_ids_.Size(); }
+ uint32_t MethodIdsSize() const { return method_ids_.Size(); }
+ uint32_t ClassDefsSize() const { return class_defs_.Size(); }
+
+ TypeId* GetTypeIdOrNullPtr(uint16_t index) {
+ return index == DexFile::kDexNoIndex16 ? nullptr : TypeIds()[index].get();
+ }
+
+ StringId* GetStringIdOrNullPtr(uint32_t index) {
+ return index == DexFile::kDexNoIndex ? nullptr : StringIds()[index].get();
+ }
+
+ void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
+
+ private:
+ uint8_t magic_[8];
+ uint32_t checksum_;
+ uint8_t signature_[DexFile::kSha1DigestSize];
+ uint32_t endian_tag_;
+ uint32_t file_size_;
+ uint32_t header_size_;
+ uint32_t link_size_;
+ uint32_t link_offset_;
+ uint32_t data_size_;
+ uint32_t data_offset_;
+
+ CollectionWithOffset<StringId> string_ids_;
+ CollectionWithOffset<TypeId> type_ids_;
+ CollectionWithOffset<ProtoId> proto_ids_;
+ CollectionWithOffset<FieldId> field_ids_;
+ CollectionWithOffset<MethodId> method_ids_;
+ CollectionWithOffset<ClassDef> class_defs_;
+ DISALLOW_COPY_AND_ASSIGN(Header);
+};
+
+class StringId : public Item {
+ public:
+ explicit StringId(const char* data) : data_(strdup(data)) { }
+ ~StringId() OVERRIDE { }
+
+ const char* Data() const { return data_.get(); }
+
+ void Accept(AbstractDispatcher* dispatch) const { dispatch->Dispatch(this); }
+
+ private:
+ std::unique_ptr<const char> data_;
+ DISALLOW_COPY_AND_ASSIGN(StringId);
+};
+
+class TypeId : public Item {
+ public:
+ explicit TypeId(StringId* string_id) : string_id_(string_id) { }
+ ~TypeId() OVERRIDE { }
+
+ StringId* GetStringId() const { return string_id_; }
+
+ void Accept(AbstractDispatcher* dispatch) const { dispatch->Dispatch(this); }
+
+ private:
+ StringId* string_id_;
+ DISALLOW_COPY_AND_ASSIGN(TypeId);
+};
+
+using TypeIdVector = std::vector<const TypeId*>;
+
+class ProtoId : public Item {
+ public:
+ ProtoId(const StringId* shorty, const TypeId* return_type, TypeIdVector* parameters)
+ : shorty_(shorty), return_type_(return_type), parameters_(parameters) { }
+ ~ProtoId() OVERRIDE { }
+
+ const StringId* Shorty() const { return shorty_; }
+ const TypeId* ReturnType() const { return return_type_; }
+ const std::vector<const TypeId*>& Parameters() const { return *parameters_; }
+
+ void Accept(AbstractDispatcher* dispatch) const { dispatch->Dispatch(this); }
+
+ private:
+ const StringId* shorty_;
+ const TypeId* return_type_;
+ std::unique_ptr<TypeIdVector> parameters_;
+ DISALLOW_COPY_AND_ASSIGN(ProtoId);
+};
+
+class FieldId : public Item {
+ public:
+ FieldId(const TypeId* klass, const TypeId* type, const StringId* name)
+ : class_(klass), type_(type), name_(name) { }
+ ~FieldId() OVERRIDE { }
+
+ const TypeId* Class() const { return class_; }
+ const TypeId* Type() const { return type_; }
+ const StringId* Name() const { return name_; }
+
+ void Accept(AbstractDispatcher* dispatch) const { dispatch->Dispatch(this); }
+
+ private:
+ const TypeId* class_;
+ const TypeId* type_;
+ const StringId* name_;
+ DISALLOW_COPY_AND_ASSIGN(FieldId);
+};
+
+class MethodId : public Item {
+ public:
+ MethodId(const TypeId* klass, const ProtoId* proto, const StringId* name)
+ : class_(klass), proto_(proto), name_(name) { }
+ ~MethodId() OVERRIDE { }
+
+ const TypeId* Class() const { return class_; }
+ const ProtoId* Proto() const { return proto_; }
+ const StringId* Name() const { return name_; }
+
+ void Accept(AbstractDispatcher* dispatch) const { dispatch->Dispatch(this); }
+
+ private:
+ const TypeId* class_;
+ const ProtoId* proto_;
+ const StringId* name_;
+ DISALLOW_COPY_AND_ASSIGN(MethodId);
+};
+
+class FieldItem : public Item {
+ public:
+ FieldItem(uint32_t access_flags, const FieldId* field_id)
+ : access_flags_(access_flags), field_id_(field_id) { }
+ ~FieldItem() OVERRIDE { }
+
+ uint32_t GetAccessFlags() const { return access_flags_; }
+ const FieldId* GetFieldId() const { return field_id_; }
+
+ void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
+
+ private:
+ uint32_t access_flags_;
+ const FieldId* field_id_;
+ DISALLOW_COPY_AND_ASSIGN(FieldItem);
+};
+
+using FieldItemVector = std::vector<std::unique_ptr<FieldItem>>;
+
+class MethodItem : public Item {
+ public:
+ MethodItem(uint32_t access_flags, const MethodId* method_id, const CodeItem* code)
+ : access_flags_(access_flags), method_id_(method_id), code_(code) { }
+ ~MethodItem() OVERRIDE { }
+
+ uint32_t GetAccessFlags() const { return access_flags_; }
+ const MethodId* GetMethodId() const { return method_id_; }
+ const CodeItem* GetCodeItem() const { return code_.get(); }
+
+ void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
+
+ private:
+ uint32_t access_flags_;
+ const MethodId* method_id_;
+ std::unique_ptr<const CodeItem> code_;
+ DISALLOW_COPY_AND_ASSIGN(MethodItem);
+};
+
+using MethodItemVector = std::vector<std::unique_ptr<MethodItem>>;
+
+class ArrayItem : public Item {
+ public:
+ class NameValuePair {
+ public:
+ NameValuePair(StringId* name, ArrayItem* value)
+ : name_(name), value_(value) { }
+
+ StringId* Name() const { return name_; }
+ ArrayItem* Value() const { return value_.get(); }
+
+ private:
+ StringId* name_;
+ std::unique_ptr<ArrayItem> value_;
+ DISALLOW_COPY_AND_ASSIGN(NameValuePair);
+ };
+
+ struct ArrayItemVariant {
+ public:
+ union {
+ bool bool_val_;
+ int8_t byte_val_;
+ int16_t short_val_;
+ uint16_t char_val_;
+ int32_t int_val_;
+ int64_t long_val_;
+ float float_val_;
+ double double_val_;
+ StringId* string_val_;
+ FieldId* field_val_;
+ MethodId* method_val_;
+ } u_;
+ std::unique_ptr<std::vector<std::unique_ptr<ArrayItem>>> annotation_array_val_;
+ struct {
+ StringId* string_;
+ std::unique_ptr<std::vector<std::unique_ptr<NameValuePair>>> array_;
+ } annotation_annotation_val_;
+ };
+
+ explicit ArrayItem(uint8_t type) : type_(type) { }
+ ~ArrayItem() OVERRIDE { }
+
+ int8_t Type() const { return type_; }
+ bool GetBoolean() const { return item_.u_.bool_val_; }
+ int8_t GetByte() const { return item_.u_.byte_val_; }
+ int16_t GetShort() const { return item_.u_.short_val_; }
+ uint16_t GetChar() const { return item_.u_.char_val_; }
+ int32_t GetInt() const { return item_.u_.int_val_; }
+ int64_t GetLong() const { return item_.u_.long_val_; }
+ float GetFloat() const { return item_.u_.float_val_; }
+ double GetDouble() const { return item_.u_.double_val_; }
+ StringId* GetStringId() const { return item_.u_.string_val_; }
+ FieldId* GetFieldId() const { return item_.u_.field_val_; }
+ MethodId* GetMethodId() const { return item_.u_.method_val_; }
+ std::vector<std::unique_ptr<ArrayItem>>* GetAnnotationArray() const {
+ return item_.annotation_array_val_.get();
+ }
+ StringId* GetAnnotationAnnotationString() const {
+ return item_.annotation_annotation_val_.string_;
+ }
+ std::vector<std::unique_ptr<NameValuePair>>* GetAnnotationAnnotationNameValuePairArray() const {
+ return item_.annotation_annotation_val_.array_.get();
+ }
+ // Used to construct the item union. Ugly, but necessary.
+ ArrayItemVariant* GetArrayItemVariant() { return &item_; }
+
+ void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
+
+ private:
+ uint8_t type_;
+ ArrayItemVariant item_;
+ DISALLOW_COPY_AND_ASSIGN(ArrayItem);
+};
+
+using ArrayItemVector = std::vector<std::unique_ptr<ArrayItem>>;
+
+class ClassData : public Item {
+ public:
+ ClassData(FieldItemVector* static_fields,
+ FieldItemVector* instance_fields,
+ MethodItemVector* direct_methods,
+ MethodItemVector* virtual_methods)
+ : static_fields_(static_fields),
+ instance_fields_(instance_fields),
+ direct_methods_(direct_methods),
+ virtual_methods_(virtual_methods) { }
+
+ ~ClassData() OVERRIDE = default;
+ FieldItemVector* StaticFields() { return static_fields_.get(); }
+ FieldItemVector* InstanceFields() { return instance_fields_.get(); }
+ MethodItemVector* DirectMethods() { return direct_methods_.get(); }
+ MethodItemVector* VirtualMethods() { return virtual_methods_.get(); }
+
+ void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
+
+ private:
+ std::unique_ptr<FieldItemVector> static_fields_;
+ std::unique_ptr<FieldItemVector> instance_fields_;
+ std::unique_ptr<MethodItemVector> direct_methods_;
+ std::unique_ptr<MethodItemVector> virtual_methods_;
+ DISALLOW_COPY_AND_ASSIGN(ClassData);
+};
+
+class ClassDef : public Item {
+ public:
+ ClassDef(const TypeId* class_type,
+ uint32_t access_flags,
+ const TypeId* superclass,
+ TypeIdVector* interfaces,
+ uint32_t interfaces_offset,
+ const StringId* source_file,
+ AnnotationsDirectoryItem* annotations,
+ ArrayItemVector* static_values,
+ ClassData* class_data)
+ : class_type_(class_type),
+ access_flags_(access_flags),
+ superclass_(superclass),
+ interfaces_(interfaces),
+ interfaces_offset_(interfaces_offset),
+ source_file_(source_file),
+ annotations_(annotations),
+ static_values_(static_values),
+ class_data_(class_data) { }
+
+ ~ClassDef() OVERRIDE { }
+
+ const TypeId* ClassType() const { return class_type_; }
+ uint32_t GetAccessFlags() const { return access_flags_; }
+ const TypeId* Superclass() const { return superclass_; }
+ TypeIdVector* Interfaces() { return interfaces_.get(); }
+ uint32_t InterfacesOffset() const { return interfaces_offset_; }
+ void SetInterfacesOffset(uint32_t new_offset) { interfaces_offset_ = new_offset; }
+ const StringId* SourceFile() const { return source_file_; }
+ AnnotationsDirectoryItem* Annotations() const { return annotations_.get(); }
+ ArrayItemVector* StaticValues() { return static_values_.get(); }
+ ClassData* GetClassData() { return class_data_.get(); }
+
+ MethodItem* GenerateMethodItem(Header& header, ClassDataItemIterator& cdii);
+
+ void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
+
+ private:
+ const TypeId* class_type_;
+ uint32_t access_flags_;
+ const TypeId* superclass_;
+ std::unique_ptr<TypeIdVector> interfaces_;
+ uint32_t interfaces_offset_;
+ const StringId* source_file_;
+ std::unique_ptr<AnnotationsDirectoryItem> annotations_;
+ std::unique_ptr<ArrayItemVector> static_values_;
+ std::unique_ptr<ClassData> class_data_;
+ DISALLOW_COPY_AND_ASSIGN(ClassDef);
+};
+
+class CatchHandler {
+ public:
+ CatchHandler(const TypeId* type_id, uint32_t address) : type_id_(type_id), address_(address) { }
+
+ const TypeId* GetTypeId() const { return type_id_; }
+ uint32_t GetAddress() const { return address_; }
+
+ private:
+ const TypeId* type_id_;
+ uint32_t address_;
+ DISALLOW_COPY_AND_ASSIGN(CatchHandler);
+};
+
+using CatchHandlerVector = std::vector<std::unique_ptr<const CatchHandler>>;
+
+class TryItem : public Item {
+ public:
+ TryItem(uint32_t start_addr, uint16_t insn_count, CatchHandlerVector* handlers)
+ : start_addr_(start_addr), insn_count_(insn_count), handlers_(handlers) { }
+ ~TryItem() OVERRIDE { }
+
+ uint32_t StartAddr() const { return start_addr_; }
+ uint16_t InsnCount() const { return insn_count_; }
+ const CatchHandlerVector& GetHandlers() const { return *handlers_.get(); }
+
+ void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
+
+ private:
+ uint32_t start_addr_;
+ uint16_t insn_count_;
+ std::unique_ptr<CatchHandlerVector> handlers_;
+ DISALLOW_COPY_AND_ASSIGN(TryItem);
+};
+
+using TryItemVector = std::vector<std::unique_ptr<const TryItem>>;
+
+class CodeItem : public Item {
+ public:
+ CodeItem(uint16_t registers_size,
+ uint16_t ins_size,
+ uint16_t outs_size,
+ DebugInfoItem* debug_info,
+ uint32_t insns_size,
+ uint16_t* insns,
+ TryItemVector* tries)
+ : registers_size_(registers_size),
+ ins_size_(ins_size),
+ outs_size_(outs_size),
+ debug_info_(debug_info),
+ insns_size_(insns_size),
+ insns_(insns),
+ tries_(tries) { }
+
+ ~CodeItem() OVERRIDE { }
+
+ uint16_t RegistersSize() const { return registers_size_; }
+ uint16_t InsSize() const { return ins_size_; }
+ uint16_t OutsSize() const { return outs_size_; }
+ uint16_t TriesSize() const { return tries_ == nullptr ? 0 : tries_->size(); }
+ DebugInfoItem* DebugInfo() const { return debug_info_.get(); }
+ uint32_t InsnsSize() const { return insns_size_; }
+ uint16_t* Insns() const { return insns_.get(); }
+ TryItemVector* Tries() const { return tries_.get(); }
+
+ void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
+
+ private:
+ uint16_t registers_size_;
+ uint16_t ins_size_;
+ uint16_t outs_size_;
+ std::unique_ptr<DebugInfoItem> debug_info_;
+ uint32_t insns_size_;
+ std::unique_ptr<uint16_t[]> insns_;
+ std::unique_ptr<TryItemVector> tries_;
+ DISALLOW_COPY_AND_ASSIGN(CodeItem);
+};
+
+
+struct PositionInfo {
+ PositionInfo(uint32_t address, uint32_t line) : address_(address), line_(line) { }
+
+ uint32_t address_;
+ uint32_t line_;
+};
+
+using PositionInfoVector = std::vector<std::unique_ptr<PositionInfo>>;
+
+struct LocalInfo {
+ LocalInfo(const char* name,
+ const char* descriptor,
+ const char* signature,
+ uint32_t start_address,
+ uint32_t end_address,
+ uint16_t reg)
+ : name_(name),
+ descriptor_(descriptor),
+ signature_(signature),
+ start_address_(start_address),
+ end_address_(end_address),
+ reg_(reg) { }
+
+ std::string name_;
+ std::string descriptor_;
+ std::string signature_;
+ uint32_t start_address_;
+ uint32_t end_address_;
+ uint16_t reg_;
+};
+
+using LocalInfoVector = std::vector<std::unique_ptr<LocalInfo>>;
+
+class DebugInfoItem : public Item {
+ public:
+ DebugInfoItem() = default;
+
+ PositionInfoVector& GetPositionInfo() { return positions_; }
+ LocalInfoVector& GetLocalInfo() { return locals_; }
+
+ private:
+ PositionInfoVector positions_;
+ LocalInfoVector locals_;
+ DISALLOW_COPY_AND_ASSIGN(DebugInfoItem);
+};
+
+class AnnotationItem {
+ public:
+ AnnotationItem(uint8_t visibility, ArrayItem* item) : visibility_(visibility), item_(item) { }
+
+ uint8_t GetVisibility() const { return visibility_; }
+ ArrayItem* GetItem() const { return item_.get(); }
+
+ private:
+ uint8_t visibility_;
+ std::unique_ptr<ArrayItem> item_;
+ DISALLOW_COPY_AND_ASSIGN(AnnotationItem);
+};
+
+using AnnotationItemVector = std::vector<std::unique_ptr<AnnotationItem>>;
+
+class AnnotationSetItem : public Item {
+ public:
+ explicit AnnotationSetItem(AnnotationItemVector* items) : items_(items) { }
+ ~AnnotationSetItem() OVERRIDE { }
+
+ AnnotationItemVector* GetItems() { return items_.get(); }
+
+ void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
+
+ private:
+ std::unique_ptr<AnnotationItemVector> items_;
+ DISALLOW_COPY_AND_ASSIGN(AnnotationSetItem);
+};
+
+using AnnotationSetItemVector = std::vector<std::unique_ptr<AnnotationSetItem>>;
+
+class FieldAnnotation {
+ public:
+ FieldAnnotation(FieldId* field_id, AnnotationSetItem* annotation_set_item)
+ : field_id_(field_id), annotation_set_item_(annotation_set_item) { }
+
+ FieldId* GetFieldId() const { return field_id_; }
+ AnnotationSetItem* GetAnnotationSetItem() const { return annotation_set_item_.get(); }
+
+ private:
+ FieldId* field_id_;
+ std::unique_ptr<AnnotationSetItem> annotation_set_item_;
+ DISALLOW_COPY_AND_ASSIGN(FieldAnnotation);
+};
+
+using FieldAnnotationVector = std::vector<std::unique_ptr<FieldAnnotation>>;
+
+class MethodAnnotation {
+ public:
+ MethodAnnotation(MethodId* method_id, AnnotationSetItem* annotation_set_item)
+ : method_id_(method_id), annotation_set_item_(annotation_set_item) { }
+
+ MethodId* GetMethodId() const { return method_id_; }
+ AnnotationSetItem* GetAnnotationSetItem() const { return annotation_set_item_.get(); }
+
+ private:
+ MethodId* method_id_;
+ std::unique_ptr<AnnotationSetItem> annotation_set_item_;
+ DISALLOW_COPY_AND_ASSIGN(MethodAnnotation);
+};
+
+using MethodAnnotationVector = std::vector<std::unique_ptr<MethodAnnotation>>;
+
+class ParameterAnnotation {
+ public:
+ ParameterAnnotation(MethodId* method_id, AnnotationSetItemVector* annotations)
+ : method_id_(method_id), annotations_(annotations) { }
+
+ MethodId* GetMethodId() const { return method_id_; }
+ AnnotationSetItemVector* GetAnnotations() { return annotations_.get(); }
+
+ private:
+ MethodId* method_id_;
+ std::unique_ptr<AnnotationSetItemVector> annotations_;
+ DISALLOW_COPY_AND_ASSIGN(ParameterAnnotation);
+};
+
+using ParameterAnnotationVector = std::vector<std::unique_ptr<ParameterAnnotation>>;
+
+class AnnotationsDirectoryItem : public Item {
+ public:
+ AnnotationsDirectoryItem(AnnotationSetItem* class_annotation,
+ FieldAnnotationVector* field_annotations,
+ MethodAnnotationVector* method_annotations,
+ ParameterAnnotationVector* parameter_annotations)
+ : class_annotation_(class_annotation),
+ field_annotations_(field_annotations),
+ method_annotations_(method_annotations),
+ parameter_annotations_(parameter_annotations) { }
+
+ AnnotationSetItem* GetClassAnnotation() const { return class_annotation_.get(); }
+ FieldAnnotationVector* GetFieldAnnotations() { return field_annotations_.get(); }
+ MethodAnnotationVector* GetMethodAnnotations() { return method_annotations_.get(); }
+ ParameterAnnotationVector* GetParameterAnnotations() { return parameter_annotations_.get(); }
+
+ void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
+
+ private:
+ std::unique_ptr<AnnotationSetItem> class_annotation_;
+ std::unique_ptr<FieldAnnotationVector> field_annotations_;
+ std::unique_ptr<MethodAnnotationVector> method_annotations_;
+ std::unique_ptr<ParameterAnnotationVector> parameter_annotations_;
+ DISALLOW_COPY_AND_ASSIGN(AnnotationsDirectoryItem);
+};
+
+// TODO(sehr): implement MapList.
+class MapList : public Item {
+ public:
+ void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MapList);
+};
+
+class MapItem : public Item {
+ public:
+ void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MapItem);
+};
+
+} // namespace dex_ir
+} // namespace art
+
+#endif // ART_DEXLAYOUT_DEX_IR_H_
diff --git a/dexlayout/dex_ir_builder.cc b/dexlayout/dex_ir_builder.cc
new file mode 100644
index 0000000..30f57d9
--- /dev/null
+++ b/dexlayout/dex_ir_builder.cc
@@ -0,0 +1,507 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Header file of an in-memory representation of DEX files.
+ */
+
+#include <stdint.h>
+#include <vector>
+
+#include "dex_ir_builder.h"
+
+namespace art {
+namespace dex_ir {
+
+namespace {
+
+static uint64_t ReadVarWidth(const uint8_t** data, uint8_t length, bool sign_extend) {
+ uint64_t value = 0;
+ for (uint32_t i = 0; i <= length; i++) {
+ value |= static_cast<uint64_t>(*(*data)++) << (i * 8);
+ }
+ if (sign_extend) {
+ int shift = (7 - length) * 8;
+ return (static_cast<int64_t>(value) << shift) >> shift;
+ }
+ return value;
+}
+
+// Prototype to break cyclic dependency.
+void ReadArrayItemVariant(Header& header,
+ const uint8_t** data,
+ uint8_t type,
+ uint8_t length,
+ ArrayItem::ArrayItemVariant* item);
+
+ArrayItem* ReadArrayItem(Header& header, const uint8_t** data, uint8_t type, uint8_t length) {
+ ArrayItem* item = new ArrayItem(type);
+ ReadArrayItemVariant(header, data, type, length, item->GetArrayItemVariant());
+ return item;
+}
+
+ArrayItem* ReadArrayItem(Header& header, const uint8_t** data) {
+ const uint8_t encoded_value = *(*data)++;
+ const uint8_t type = encoded_value & 0x1f;
+ ArrayItem* item = new ArrayItem(type);
+ ReadArrayItemVariant(header, data, type, encoded_value >> 5, item->GetArrayItemVariant());
+ return item;
+}
+
+void ReadArrayItemVariant(Header& header,
+ const uint8_t** data,
+ uint8_t type,
+ uint8_t length,
+ ArrayItem::ArrayItemVariant* item) {
+ switch (type) {
+ case DexFile::kDexAnnotationByte:
+ item->u_.byte_val_ = static_cast<int8_t>(ReadVarWidth(data, length, false));
+ break;
+ case DexFile::kDexAnnotationShort:
+ item->u_.short_val_ = static_cast<int16_t>(ReadVarWidth(data, length, true));
+ break;
+ case DexFile::kDexAnnotationChar:
+ item->u_.char_val_ = static_cast<uint16_t>(ReadVarWidth(data, length, false));
+ break;
+ case DexFile::kDexAnnotationInt:
+ item->u_.int_val_ = static_cast<int32_t>(ReadVarWidth(data, length, true));
+ break;
+ case DexFile::kDexAnnotationLong:
+ item->u_.long_val_ = static_cast<int64_t>(ReadVarWidth(data, length, true));
+ break;
+ case DexFile::kDexAnnotationFloat: {
+ // Fill on right.
+ union {
+ float f;
+ uint32_t data;
+ } conv;
+ conv.data = static_cast<uint32_t>(ReadVarWidth(data, length, false)) << (3 - length) * 8;
+ item->u_.float_val_ = conv.f;
+ break;
+ }
+ case DexFile::kDexAnnotationDouble: {
+ // Fill on right.
+ union {
+ double d;
+ uint64_t data;
+ } conv;
+ conv.data = ReadVarWidth(data, length, false) << (7 - length) * 8;
+ item->u_.double_val_ = conv.d;
+ break;
+ }
+ case DexFile::kDexAnnotationString: {
+ const uint32_t string_index = static_cast<uint32_t>(ReadVarWidth(data, length, false));
+ item->u_.string_val_ = header.StringIds()[string_index].get();
+ break;
+ }
+ case DexFile::kDexAnnotationType: {
+ const uint32_t string_index = static_cast<uint32_t>(ReadVarWidth(data, length, false));
+ item->u_.string_val_ = header.TypeIds()[string_index]->GetStringId();
+ break;
+ }
+ case DexFile::kDexAnnotationField:
+ case DexFile::kDexAnnotationEnum: {
+ const uint32_t field_index = static_cast<uint32_t>(ReadVarWidth(data, length, false));
+ item->u_.field_val_ = header.FieldIds()[field_index].get();
+ break;
+ }
+ case DexFile::kDexAnnotationMethod: {
+ const uint32_t method_index = static_cast<uint32_t>(ReadVarWidth(data, length, false));
+ item->u_.method_val_ = header.MethodIds()[method_index].get();
+ break;
+ }
+ case DexFile::kDexAnnotationArray: {
+ item->annotation_array_val_.reset(new ArrayItemVector());
+ // Decode all elements.
+ const uint32_t size = DecodeUnsignedLeb128(data);
+ for (uint32_t i = 0; i < size; i++) {
+ item->annotation_array_val_->push_back(
+ std::unique_ptr<ArrayItem>(ReadArrayItem(header, data)));
+ }
+ break;
+ }
+ case DexFile::kDexAnnotationAnnotation: {
+ const uint32_t type_idx = DecodeUnsignedLeb128(data);
+ item->annotation_annotation_val_.string_ = header.TypeIds()[type_idx]->GetStringId();
+ item->annotation_annotation_val_.array_.reset(
+ new std::vector<std::unique_ptr<ArrayItem::NameValuePair>>());
+ // Decode all name=value pairs.
+ const uint32_t size = DecodeUnsignedLeb128(data);
+ for (uint32_t i = 0; i < size; i++) {
+ const uint32_t name_index = DecodeUnsignedLeb128(data);
+ item->annotation_annotation_val_.array_->push_back(
+ std::unique_ptr<ArrayItem::NameValuePair>(
+ new ArrayItem::NameValuePair(header.StringIds()[name_index].get(),
+ ReadArrayItem(header, data))));
+ }
+ break;
+ }
+ case DexFile::kDexAnnotationNull:
+ break;
+ case DexFile::kDexAnnotationBoolean:
+ item->u_.bool_val_ = (length != 0);
+ break;
+ default:
+ break;
+ }
+}
+
+static bool GetPositionsCb(void* context, const DexFile::PositionInfo& entry) {
+ DebugInfoItem* debug_info = reinterpret_cast<DebugInfoItem*>(context);
+ PositionInfoVector& positions = debug_info->GetPositionInfo();
+ positions.push_back(std::unique_ptr<PositionInfo>(new PositionInfo(entry.address_, entry.line_)));
+ return false;
+}
+
+static void GetLocalsCb(void* context, const DexFile::LocalInfo& entry) {
+ DebugInfoItem* debug_info = reinterpret_cast<DebugInfoItem*>(context);
+ LocalInfoVector& locals = debug_info->GetLocalInfo();
+ const char* name = entry.name_ != nullptr ? entry.name_ : "(null)";
+ const char* signature = entry.signature_ != nullptr ? entry.signature_ : "";
+ locals.push_back(std::unique_ptr<LocalInfo>(
+ new LocalInfo(name, entry.descriptor_, signature, entry.start_address_,
+ entry.end_address_, entry.reg_)));
+}
+
+CodeItem* ReadCodeItem(const DexFile& dex_file,
+ const DexFile::CodeItem& disk_code_item,
+ Header& header) {
+ uint16_t registers_size = disk_code_item.registers_size_;
+ uint16_t ins_size = disk_code_item.ins_size_;
+ uint16_t outs_size = disk_code_item.outs_size_;
+ uint32_t tries_size = disk_code_item.tries_size_;
+
+ const uint8_t* debug_info_stream = dex_file.GetDebugInfoStream(&disk_code_item);
+ DebugInfoItem* debug_info = nullptr;
+ if (debug_info_stream != nullptr) {
+ debug_info = new DebugInfoItem();
+ }
+
+ uint32_t insns_size = disk_code_item.insns_size_in_code_units_;
+ uint16_t* insns = new uint16_t[insns_size];
+ memcpy(insns, disk_code_item.insns_, insns_size * sizeof(uint16_t));
+
+ TryItemVector* tries = nullptr;
+ if (tries_size > 0) {
+ tries = new TryItemVector();
+ for (uint32_t i = 0; i < tries_size; ++i) {
+ const DexFile::TryItem* disk_try_item = dex_file.GetTryItems(disk_code_item, i);
+ uint32_t start_addr = disk_try_item->start_addr_;
+ uint16_t insn_count = disk_try_item->insn_count_;
+ CatchHandlerVector* handlers = new CatchHandlerVector();
+ for (CatchHandlerIterator it(disk_code_item, *disk_try_item); it.HasNext(); it.Next()) {
+ const uint16_t type_index = it.GetHandlerTypeIndex();
+ const TypeId* type_id = header.GetTypeIdOrNullPtr(type_index);
+ handlers->push_back(std::unique_ptr<const CatchHandler>(
+ new CatchHandler(type_id, it.GetHandlerAddress())));
+ }
+ TryItem* try_item = new TryItem(start_addr, insn_count, handlers);
+ tries->push_back(std::unique_ptr<const TryItem>(try_item));
+ }
+ }
+ return new CodeItem(registers_size, ins_size, outs_size, debug_info, insns_size, insns, tries);
+}
+
+MethodItem* GenerateMethodItem(const DexFile& dex_file,
+ dex_ir::Header& header,
+ ClassDataItemIterator& cdii) {
+ MethodId* method_item = header.MethodIds()[cdii.GetMemberIndex()].get();
+ uint32_t access_flags = cdii.GetRawMemberAccessFlags();
+ const DexFile::CodeItem* disk_code_item = cdii.GetMethodCodeItem();
+ CodeItem* code_item = nullptr;
+ DebugInfoItem* debug_info = nullptr;
+ if (disk_code_item != nullptr) {
+ code_item = ReadCodeItem(dex_file, *disk_code_item, header);
+ code_item->SetOffset(cdii.GetMethodCodeItemOffset());
+ debug_info = code_item->DebugInfo();
+ }
+ if (debug_info != nullptr) {
+ bool is_static = (access_flags & kAccStatic) != 0;
+ dex_file.DecodeDebugLocalInfo(
+ disk_code_item, is_static, cdii.GetMemberIndex(), GetLocalsCb, debug_info);
+ dex_file.DecodeDebugPositionInfo(disk_code_item, GetPositionsCb, debug_info);
+ }
+ return new MethodItem(access_flags, method_item, code_item);
+}
+
+AnnotationSetItem* ReadAnnotationSetItem(const DexFile& dex_file,
+ const DexFile::AnnotationSetItem& disk_annotations_item,
+ Header& header) {
+ if (disk_annotations_item.size_ == 0) {
+ return nullptr;
+ }
+ AnnotationItemVector* items = new AnnotationItemVector();
+ for (uint32_t i = 0; i < disk_annotations_item.size_; ++i) {
+ const DexFile::AnnotationItem* annotation =
+ dex_file.GetAnnotationItem(&disk_annotations_item, i);
+ if (annotation == nullptr) {
+ continue;
+ }
+ uint8_t visibility = annotation->visibility_;
+ const uint8_t* annotation_data = annotation->annotation_;
+ ArrayItem* array_item =
+ ReadArrayItem(header, &annotation_data, DexFile::kDexAnnotationAnnotation, 0);
+ items->push_back(std::unique_ptr<AnnotationItem>(new AnnotationItem(visibility, array_item)));
+ }
+ return new AnnotationSetItem(items);
+}
+
+ParameterAnnotation* ReadParameterAnnotation(
+ const DexFile& dex_file,
+ MethodId* method_id,
+ const DexFile::AnnotationSetRefList* annotation_set_ref_list,
+ Header& header) {
+ AnnotationSetItemVector* annotations = new AnnotationSetItemVector();
+ for (uint32_t i = 0; i < annotation_set_ref_list->size_; ++i) {
+ const DexFile::AnnotationSetItem* annotation_set_item =
+ dex_file.GetSetRefItemItem(&annotation_set_ref_list->list_[i]);
+ annotations->push_back(std::unique_ptr<AnnotationSetItem>(
+ ReadAnnotationSetItem(dex_file, *annotation_set_item, header)));
+ }
+ return new ParameterAnnotation(method_id, annotations);
+}
+
+AnnotationsDirectoryItem* ReadAnnotationsDirectoryItem(
+ const DexFile& dex_file,
+ const DexFile::AnnotationsDirectoryItem* disk_annotations_item,
+ Header& header) {
+ const DexFile::AnnotationSetItem* class_set_item =
+ dex_file.GetClassAnnotationSet(disk_annotations_item);
+ AnnotationSetItem* class_annotation = nullptr;
+ if (class_set_item != nullptr) {
+ class_annotation = ReadAnnotationSetItem(dex_file, *class_set_item, header);
+ }
+ const DexFile::FieldAnnotationsItem* fields =
+ dex_file.GetFieldAnnotations(disk_annotations_item);
+ FieldAnnotationVector* field_annotations = nullptr;
+ if (fields != nullptr) {
+ field_annotations = new FieldAnnotationVector();
+ for (uint32_t i = 0; i < disk_annotations_item->fields_size_; ++i) {
+ FieldId* field_id = header.FieldIds()[fields[i].field_idx_].get();
+ const DexFile::AnnotationSetItem* field_set_item =
+ dex_file.GetFieldAnnotationSetItem(fields[i]);
+ AnnotationSetItem* annotation_set_item =
+ ReadAnnotationSetItem(dex_file, *field_set_item, header);
+ field_annotations->push_back(std::unique_ptr<FieldAnnotation>(
+ new FieldAnnotation(field_id, annotation_set_item)));
+ }
+ }
+ const DexFile::MethodAnnotationsItem* methods =
+ dex_file.GetMethodAnnotations(disk_annotations_item);
+ MethodAnnotationVector* method_annotations = nullptr;
+ if (methods != nullptr) {
+ method_annotations = new MethodAnnotationVector();
+ for (uint32_t i = 0; i < disk_annotations_item->methods_size_; ++i) {
+ MethodId* method_id = header.MethodIds()[methods[i].method_idx_].get();
+ const DexFile::AnnotationSetItem* method_set_item =
+ dex_file.GetMethodAnnotationSetItem(methods[i]);
+ AnnotationSetItem* annotation_set_item =
+ ReadAnnotationSetItem(dex_file, *method_set_item, header);
+ method_annotations->push_back(std::unique_ptr<MethodAnnotation>(
+ new MethodAnnotation(method_id, annotation_set_item)));
+ }
+ }
+ const DexFile::ParameterAnnotationsItem* parameters =
+ dex_file.GetParameterAnnotations(disk_annotations_item);
+ ParameterAnnotationVector* parameter_annotations = nullptr;
+ if (parameters != nullptr) {
+ parameter_annotations = new ParameterAnnotationVector();
+ for (uint32_t i = 0; i < disk_annotations_item->parameters_size_; ++i) {
+ MethodId* method_id = header.MethodIds()[parameters[i].method_idx_].get();
+ const DexFile::AnnotationSetRefList* list =
+ dex_file.GetParameterAnnotationSetRefList(¶meters[i]);
+ parameter_annotations->push_back(std::unique_ptr<ParameterAnnotation>(
+ ReadParameterAnnotation(dex_file, method_id, list, header)));
+ }
+ }
+
+ return new AnnotationsDirectoryItem(class_annotation,
+ field_annotations,
+ method_annotations,
+ parameter_annotations);
+}
+
+ClassDef* ReadClassDef(const DexFile& dex_file,
+ const DexFile::ClassDef& disk_class_def,
+ Header& header) {
+ const TypeId* class_type = header.TypeIds()[disk_class_def.class_idx_].get();
+ uint32_t access_flags = disk_class_def.access_flags_;
+ const TypeId* superclass = header.GetTypeIdOrNullPtr(disk_class_def.superclass_idx_);
+
+ TypeIdVector* interfaces = nullptr;
+ const DexFile::TypeList* type_list = dex_file.GetInterfacesList(disk_class_def);
+ uint32_t interfaces_offset = disk_class_def.interfaces_off_;
+ if (type_list != nullptr) {
+ interfaces = new TypeIdVector();
+ for (uint32_t index = 0; index < type_list->Size(); ++index) {
+ interfaces->push_back(header.TypeIds()[type_list->GetTypeItem(index).type_idx_].get());
+ }
+ }
+ const StringId* source_file = header.GetStringIdOrNullPtr(disk_class_def.source_file_idx_);
+ // Annotations.
+ AnnotationsDirectoryItem* annotations = nullptr;
+ const DexFile::AnnotationsDirectoryItem* disk_annotations_directory_item =
+ dex_file.GetAnnotationsDirectory(disk_class_def);
+ if (disk_annotations_directory_item != nullptr) {
+ annotations = ReadAnnotationsDirectoryItem(dex_file, disk_annotations_directory_item, header);
+ annotations->SetOffset(disk_class_def.annotations_off_);
+ }
+ // Static field initializers.
+ ArrayItemVector* static_values = nullptr;
+ const uint8_t* static_data = dex_file.GetEncodedStaticFieldValuesArray(disk_class_def);
+ if (static_data != nullptr) {
+ uint32_t static_value_count = static_data == nullptr ? 0 : DecodeUnsignedLeb128(&static_data);
+ if (static_value_count > 0) {
+ static_values = new ArrayItemVector();
+ for (uint32_t i = 0; i < static_value_count; ++i) {
+ static_values->push_back(std::unique_ptr<ArrayItem>(ReadArrayItem(header, &static_data)));
+ }
+ }
+ }
+ // Read the fields and methods defined by the class, resolving the circular reference from those
+ // to classes by setting class at the same time.
+ const uint8_t* encoded_data = dex_file.GetClassData(disk_class_def);
+ ClassData* class_data = nullptr;
+ if (encoded_data != nullptr) {
+ uint32_t offset = disk_class_def.class_data_off_;
+ ClassDataItemIterator cdii(dex_file, encoded_data);
+ // Static fields.
+ FieldItemVector* static_fields = new FieldItemVector();
+ for (uint32_t i = 0; cdii.HasNextStaticField(); i++, cdii.Next()) {
+ FieldId* field_item = header.FieldIds()[cdii.GetMemberIndex()].get();
+ uint32_t access_flags = cdii.GetRawMemberAccessFlags();
+ static_fields->push_back(std::unique_ptr<FieldItem>(new FieldItem(access_flags, field_item)));
+ }
+ // Instance fields.
+ FieldItemVector* instance_fields = new FieldItemVector();
+ for (uint32_t i = 0; cdii.HasNextInstanceField(); i++, cdii.Next()) {
+ FieldId* field_item = header.FieldIds()[cdii.GetMemberIndex()].get();
+ uint32_t access_flags = cdii.GetRawMemberAccessFlags();
+ instance_fields->push_back(
+ std::unique_ptr<FieldItem>(new FieldItem(access_flags, field_item)));
+ }
+ // Direct methods.
+ MethodItemVector* direct_methods = new MethodItemVector();
+ for (uint32_t i = 0; cdii.HasNextDirectMethod(); i++, cdii.Next()) {
+ direct_methods->push_back(
+ std::unique_ptr<MethodItem>(GenerateMethodItem(dex_file, header, cdii)));
+ }
+ // Virtual methods.
+ MethodItemVector* virtual_methods = new MethodItemVector();
+ for (uint32_t i = 0; cdii.HasNextVirtualMethod(); i++, cdii.Next()) {
+ virtual_methods->push_back(
+ std::unique_ptr<MethodItem>(GenerateMethodItem(dex_file, header, cdii)));
+ }
+ class_data = new ClassData(static_fields, instance_fields, direct_methods, virtual_methods);
+ class_data->SetOffset(offset);
+ }
+ return new ClassDef(class_type,
+ access_flags,
+ superclass,
+ interfaces,
+ interfaces_offset,
+ source_file,
+ annotations,
+ static_values,
+ class_data);
+}
+
+} // namespace
+
+Header* DexIrBuilder(const DexFile& dex_file) {
+ const DexFile::Header& disk_header = dex_file.GetHeader();
+ Header* header = new Header(disk_header.magic_,
+ disk_header.checksum_,
+ disk_header.signature_,
+ disk_header.endian_tag_,
+ disk_header.file_size_,
+ disk_header.header_size_,
+ disk_header.link_size_,
+ disk_header.link_off_,
+ disk_header.data_size_,
+ disk_header.data_off_);
+ // Walk the rest of the header fields.
+ // StringId table.
+ std::vector<std::unique_ptr<StringId>>& string_ids = header->StringIds();
+ header->SetStringIdsOffset(disk_header.string_ids_off_);
+ for (uint32_t i = 0; i < dex_file.NumStringIds(); ++i) {
+ const DexFile::StringId& disk_string_id = dex_file.GetStringId(i);
+ StringId* string_id = new StringId(dex_file.GetStringData(disk_string_id));
+ string_id->SetOffset(i);
+ string_ids.push_back(std::unique_ptr<StringId>(string_id));
+ }
+ // TypeId table.
+ std::vector<std::unique_ptr<TypeId>>& type_ids = header->TypeIds();
+ header->SetTypeIdsOffset(disk_header.type_ids_off_);
+ for (uint32_t i = 0; i < dex_file.NumTypeIds(); ++i) {
+ const DexFile::TypeId& disk_type_id = dex_file.GetTypeId(i);
+ TypeId* type_id = new TypeId(header->StringIds()[disk_type_id.descriptor_idx_].get());
+ type_id->SetOffset(i);
+ type_ids.push_back(std::unique_ptr<TypeId>(type_id));
+ }
+ // ProtoId table.
+ std::vector<std::unique_ptr<ProtoId>>& proto_ids = header->ProtoIds();
+ header->SetProtoIdsOffset(disk_header.proto_ids_off_);
+ for (uint32_t i = 0; i < dex_file.NumProtoIds(); ++i) {
+ const DexFile::ProtoId& disk_proto_id = dex_file.GetProtoId(i);
+ // Build the parameter type vector.
+ TypeIdVector* parameters = new TypeIdVector();
+ DexFileParameterIterator dfpi(dex_file, disk_proto_id);
+ while (dfpi.HasNext()) {
+ parameters->push_back(header->TypeIds()[dfpi.GetTypeIdx()].get());
+ dfpi.Next();
+ }
+ ProtoId* proto_id = new ProtoId(header->StringIds()[disk_proto_id.shorty_idx_].get(),
+ header->TypeIds()[disk_proto_id.return_type_idx_].get(),
+ parameters);
+ proto_id->SetOffset(i);
+ proto_ids.push_back(std::unique_ptr<ProtoId>(proto_id));
+ }
+ // FieldId table.
+ std::vector<std::unique_ptr<FieldId>>& field_ids = header->FieldIds();
+ header->SetFieldIdsOffset(disk_header.field_ids_off_);
+ for (uint32_t i = 0; i < dex_file.NumFieldIds(); ++i) {
+ const DexFile::FieldId& disk_field_id = dex_file.GetFieldId(i);
+ FieldId* field_id = new FieldId(header->TypeIds()[disk_field_id.class_idx_].get(),
+ header->TypeIds()[disk_field_id.type_idx_].get(),
+ header->StringIds()[disk_field_id.name_idx_].get());
+ field_id->SetOffset(i);
+ field_ids.push_back(std::unique_ptr<FieldId>(field_id));
+ }
+ // MethodId table.
+ std::vector<std::unique_ptr<MethodId>>& method_ids = header->MethodIds();
+ header->SetMethodIdsOffset(disk_header.method_ids_off_);
+ for (uint32_t i = 0; i < dex_file.NumMethodIds(); ++i) {
+ const DexFile::MethodId& disk_method_id = dex_file.GetMethodId(i);
+ MethodId* method_id = new MethodId(header->TypeIds()[disk_method_id.class_idx_].get(),
+ header->ProtoIds()[disk_method_id.proto_idx_].get(),
+ header->StringIds()[disk_method_id.name_idx_].get());
+ method_id->SetOffset(i);
+ method_ids.push_back(std::unique_ptr<MethodId>(method_id));
+ }
+ // ClassDef table.
+ std::vector<std::unique_ptr<ClassDef>>& class_defs = header->ClassDefs();
+ header->SetClassDefsOffset(disk_header.class_defs_off_);
+ for (uint32_t i = 0; i < dex_file.NumClassDefs(); ++i) {
+ const DexFile::ClassDef& disk_class_def = dex_file.GetClassDef(i);
+ ClassDef* class_def = ReadClassDef(dex_file, disk_class_def, *header);
+ class_def->SetOffset(i);
+ class_defs.push_back(std::unique_ptr<ClassDef>(class_def));
+ }
+
+ return header;
+}
+
+} // namespace dex_ir
+} // namespace art
diff --git a/dexlayout/dex_ir_builder.h b/dexlayout/dex_ir_builder.h
new file mode 100644
index 0000000..c53157b
--- /dev/null
+++ b/dexlayout/dex_ir_builder.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Header file of an in-memory representation of DEX files.
+ */
+
+#ifndef ART_DEXLAYOUT_DEX_IR_BUILDER_H_
+#define ART_DEXLAYOUT_DEX_IR_BUILDER_H_
+
+#include "dex_ir.h"
+
+namespace art {
+namespace dex_ir {
+
+dex_ir::Header* DexIrBuilder(const DexFile& dex_file);
+
+} // namespace dex_ir
+} // namespace art
+
+#endif // ART_DEXLAYOUT_DEX_IR_BUILDER_H_
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
new file mode 100644
index 0000000..3a3f417
--- /dev/null
+++ b/dexlayout/dexlayout.cc
@@ -0,0 +1,1557 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Implementation file of the dexlayout utility.
+ *
+ * This is a tool to read dex files into an internal representation,
+ * reorganize the representation, and emit dex files with a better
+ * file layout.
+ */
+
+#include "dexlayout.h"
+
+#include <inttypes.h>
+#include <stdio.h>
+
+#include <iostream>
+#include <memory>
+#include <sstream>
+#include <vector>
+
+#include "dex_ir_builder.h"
+#include "dex_file-inl.h"
+#include "dex_instruction-inl.h"
+#include "utils.h"
+
+namespace art {
+
+/*
+ * Options parsed in main driver.
+ */
+struct Options options_;
+
+/*
+ * Output file. Defaults to stdout.
+ */
+FILE* out_file_ = stdout;
+
+/*
+ * Flags for use with createAccessFlagStr().
+ */
+enum AccessFor {
+ kAccessForClass = 0, kAccessForMethod = 1, kAccessForField = 2, kAccessForMAX
+};
+const int kNumFlags = 18;
+
+/*
+ * Gets 2 little-endian bytes.
+ */
+static inline uint16_t Get2LE(unsigned char const* src) {
+ return src[0] | (src[1] << 8);
+}
+
+/*
+ * Converts a type descriptor to human-readable "dotted" form. For
+ * example, "Ljava/lang/String;" becomes "java.lang.String", and
+ * "[I" becomes "int[]". Also converts '$' to '.', which means this
+ * form can't be converted back to a descriptor.
+ */
+static std::string DescriptorToDotWrapper(const char* descriptor) {
+ std::string result = DescriptorToDot(descriptor);
+ size_t found = result.find('$');
+ while (found != std::string::npos) {
+ result[found] = '.';
+ found = result.find('$', found);
+ }
+ return result;
+}
+
+/*
+ * Converts the class name portion of a type descriptor to human-readable
+ * "dotted" form. For example, "Ljava/lang/String;" becomes "String".
+ */
+static std::string DescriptorClassToDot(const char* str) {
+ std::string descriptor(str);
+ // Reduce to just the class name prefix.
+ size_t last_slash = descriptor.rfind('/');
+ if (last_slash == std::string::npos) {
+ last_slash = 0;
+ }
+ // Start past the '/' or 'L'.
+ last_slash++;
+
+ // Copy class name over, trimming trailing ';'.
+ size_t size = descriptor.size() - 1 - last_slash;
+ std::string result(descriptor.substr(last_slash, size));
+
+ // Replace '$' with '.'.
+ size_t dollar_sign = result.find('$');
+ while (dollar_sign != std::string::npos) {
+ result[dollar_sign] = '.';
+ dollar_sign = result.find('$', dollar_sign);
+ }
+
+ return result;
+}
+
+/*
+ * Returns string representing the boolean value.
+ */
+static const char* StrBool(bool val) {
+ return val ? "true" : "false";
+}
+
+/*
+ * Returns a quoted string representing the boolean value.
+ */
+static const char* QuotedBool(bool val) {
+ return val ? "\"true\"" : "\"false\"";
+}
+
+/*
+ * Returns a quoted string representing the access flags.
+ */
+static const char* QuotedVisibility(uint32_t access_flags) {
+ if (access_flags & kAccPublic) {
+ return "\"public\"";
+ } else if (access_flags & kAccProtected) {
+ return "\"protected\"";
+ } else if (access_flags & kAccPrivate) {
+ return "\"private\"";
+ } else {
+ return "\"package\"";
+ }
+}
+
+/*
+ * Counts the number of '1' bits in a word.
+ */
+static int CountOnes(uint32_t val) {
+ val = val - ((val >> 1) & 0x55555555);
+ val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
+ return (((val + (val >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
+}
+
+/*
+ * Creates a new string with human-readable access flags.
+ *
+ * In the base language the access_flags fields are type uint16_t; in Dalvik they're uint32_t.
+ */
+static char* CreateAccessFlagStr(uint32_t flags, AccessFor for_what) {
+ static const char* kAccessStrings[kAccessForMAX][kNumFlags] = {
+ {
+ "PUBLIC", /* 0x00001 */
+ "PRIVATE", /* 0x00002 */
+ "PROTECTED", /* 0x00004 */
+ "STATIC", /* 0x00008 */
+ "FINAL", /* 0x00010 */
+ "?", /* 0x00020 */
+ "?", /* 0x00040 */
+ "?", /* 0x00080 */
+ "?", /* 0x00100 */
+ "INTERFACE", /* 0x00200 */
+ "ABSTRACT", /* 0x00400 */
+ "?", /* 0x00800 */
+ "SYNTHETIC", /* 0x01000 */
+ "ANNOTATION", /* 0x02000 */
+ "ENUM", /* 0x04000 */
+ "?", /* 0x08000 */
+ "VERIFIED", /* 0x10000 */
+ "OPTIMIZED", /* 0x20000 */
+ }, {
+ "PUBLIC", /* 0x00001 */
+ "PRIVATE", /* 0x00002 */
+ "PROTECTED", /* 0x00004 */
+ "STATIC", /* 0x00008 */
+ "FINAL", /* 0x00010 */
+ "SYNCHRONIZED", /* 0x00020 */
+ "BRIDGE", /* 0x00040 */
+ "VARARGS", /* 0x00080 */
+ "NATIVE", /* 0x00100 */
+ "?", /* 0x00200 */
+ "ABSTRACT", /* 0x00400 */
+ "STRICT", /* 0x00800 */
+ "SYNTHETIC", /* 0x01000 */
+ "?", /* 0x02000 */
+ "?", /* 0x04000 */
+ "MIRANDA", /* 0x08000 */
+ "CONSTRUCTOR", /* 0x10000 */
+ "DECLARED_SYNCHRONIZED", /* 0x20000 */
+ }, {
+ "PUBLIC", /* 0x00001 */
+ "PRIVATE", /* 0x00002 */
+ "PROTECTED", /* 0x00004 */
+ "STATIC", /* 0x00008 */
+ "FINAL", /* 0x00010 */
+ "?", /* 0x00020 */
+ "VOLATILE", /* 0x00040 */
+ "TRANSIENT", /* 0x00080 */
+ "?", /* 0x00100 */
+ "?", /* 0x00200 */
+ "?", /* 0x00400 */
+ "?", /* 0x00800 */
+ "SYNTHETIC", /* 0x01000 */
+ "?", /* 0x02000 */
+ "ENUM", /* 0x04000 */
+ "?", /* 0x08000 */
+ "?", /* 0x10000 */
+ "?", /* 0x20000 */
+ },
+ };
+
+ // Allocate enough storage to hold the expected number of strings,
+ // plus a space between each. We over-allocate, using the longest
+ // string above as the base metric.
+ const int kLongest = 21; // The strlen of longest string above.
+ const int count = CountOnes(flags);
+ char* str;
+ char* cp;
+ cp = str = reinterpret_cast<char*>(malloc(count * (kLongest + 1) + 1));
+
+ for (int i = 0; i < kNumFlags; i++) {
+ if (flags & 0x01) {
+ const char* accessStr = kAccessStrings[for_what][i];
+ const int len = strlen(accessStr);
+ if (cp != str) {
+ *cp++ = ' ';
+ }
+ memcpy(cp, accessStr, len);
+ cp += len;
+ }
+ flags >>= 1;
+ } // for
+
+ *cp = '\0';
+ return str;
+}
+
+static std::string GetSignatureForProtoId(const dex_ir::ProtoId* proto) {
+ if (proto == nullptr) {
+ return "<no signature>";
+ }
+
+ const std::vector<const dex_ir::TypeId*>& params = proto->Parameters();
+ std::string result("(");
+ for (uint32_t i = 0; i < params.size(); ++i) {
+ result += params[i]->GetStringId()->Data();
+ }
+ result += ")";
+ result += proto->ReturnType()->GetStringId()->Data();
+ return result;
+}
+
+/*
+ * Copies character data from "data" to "out", converting non-ASCII values
+ * to fprintf format chars or an ASCII filler ('.' or '?').
+ *
+ * The output buffer must be able to hold (2*len)+1 bytes. The result is
+ * NULL-terminated.
+ */
+static void Asciify(char* out, const unsigned char* data, size_t len) {
+ while (len--) {
+ if (*data < 0x20) {
+ // Could do more here, but we don't need them yet.
+ switch (*data) {
+ case '\0':
+ *out++ = '\\';
+ *out++ = '0';
+ break;
+ case '\n':
+ *out++ = '\\';
+ *out++ = 'n';
+ break;
+ default:
+ *out++ = '.';
+ break;
+ } // switch
+ } else if (*data >= 0x80) {
+ *out++ = '?';
+ } else {
+ *out++ = *data;
+ }
+ data++;
+ } // while
+ *out = '\0';
+}
+
+/*
+ * Dumps a string value with some escape characters.
+ */
+static void DumpEscapedString(const char* p) {
+ fputs("\"", out_file_);
+ for (; *p; p++) {
+ switch (*p) {
+ case '\\':
+ fputs("\\\\", out_file_);
+ break;
+ case '\"':
+ fputs("\\\"", out_file_);
+ break;
+ case '\t':
+ fputs("\\t", out_file_);
+ break;
+ case '\n':
+ fputs("\\n", out_file_);
+ break;
+ case '\r':
+ fputs("\\r", out_file_);
+ break;
+ default:
+ putc(*p, out_file_);
+ } // switch
+ } // for
+ fputs("\"", out_file_);
+}
+
+/*
+ * Dumps a string as an XML attribute value.
+ */
+static void DumpXmlAttribute(const char* p) {
+ for (; *p; p++) {
+ switch (*p) {
+ case '&':
+ fputs("&", out_file_);
+ break;
+ case '<':
+ fputs("<", out_file_);
+ break;
+ case '>':
+ fputs(">", out_file_);
+ break;
+ case '"':
+ fputs(""", out_file_);
+ break;
+ case '\t':
+ fputs("	", out_file_);
+ break;
+ case '\n':
+ fputs("
", out_file_);
+ break;
+ case '\r':
+ fputs("
", out_file_);
+ break;
+ default:
+ putc(*p, out_file_);
+ } // switch
+ } // for
+}
+
+/*
+ * Dumps encoded value.
+ */
+static void DumpEncodedValue(const dex_ir::ArrayItem* data) {
+ switch (data->Type()) {
+ case DexFile::kDexAnnotationByte:
+ fprintf(out_file_, "%" PRId8, data->GetByte());
+ break;
+ case DexFile::kDexAnnotationShort:
+ fprintf(out_file_, "%" PRId16, data->GetShort());
+ break;
+ case DexFile::kDexAnnotationChar:
+ fprintf(out_file_, "%" PRIu16, data->GetChar());
+ break;
+ case DexFile::kDexAnnotationInt:
+ fprintf(out_file_, "%" PRId32, data->GetInt());
+ break;
+ case DexFile::kDexAnnotationLong:
+ fprintf(out_file_, "%" PRId64, data->GetLong());
+ break;
+ case DexFile::kDexAnnotationFloat: {
+ fprintf(out_file_, "%g", data->GetFloat());
+ break;
+ }
+ case DexFile::kDexAnnotationDouble: {
+ fprintf(out_file_, "%g", data->GetDouble());
+ break;
+ }
+ case DexFile::kDexAnnotationString: {
+ dex_ir::StringId* string_id = data->GetStringId();
+ if (options_.output_format_ == kOutputPlain) {
+ DumpEscapedString(string_id->Data());
+ } else {
+ DumpXmlAttribute(string_id->Data());
+ }
+ break;
+ }
+ case DexFile::kDexAnnotationType: {
+ dex_ir::StringId* string_id = data->GetStringId();
+ fputs(string_id->Data(), out_file_);
+ break;
+ }
+ case DexFile::kDexAnnotationField:
+ case DexFile::kDexAnnotationEnum: {
+ dex_ir::FieldId* field_id = data->GetFieldId();
+ fputs(field_id->Name()->Data(), out_file_);
+ break;
+ }
+ case DexFile::kDexAnnotationMethod: {
+ dex_ir::MethodId* method_id = data->GetMethodId();
+ fputs(method_id->Name()->Data(), out_file_);
+ break;
+ }
+ case DexFile::kDexAnnotationArray: {
+ fputc('{', out_file_);
+ // Display all elements.
+ for (auto& array : *data->GetAnnotationArray()) {
+ fputc(' ', out_file_);
+ DumpEncodedValue(array.get());
+ }
+ fputs(" }", out_file_);
+ break;
+ }
+ case DexFile::kDexAnnotationAnnotation: {
+ fputs(data->GetAnnotationAnnotationString()->Data(), out_file_);
+ // Display all name=value pairs.
+ for (auto& subannotation : *data->GetAnnotationAnnotationNameValuePairArray()) {
+ fputc(' ', out_file_);
+ fputs(subannotation->Name()->Data(), out_file_);
+ fputc('=', out_file_);
+ DumpEncodedValue(subannotation->Value());
+ }
+ break;
+ }
+ case DexFile::kDexAnnotationNull:
+ fputs("null", out_file_);
+ break;
+ case DexFile::kDexAnnotationBoolean:
+ fputs(StrBool(data->GetBoolean()), out_file_);
+ break;
+ default:
+ fputs("????", out_file_);
+ break;
+ } // switch
+}
+
+/*
+ * Dumps the file header.
+ */
+static void DumpFileHeader(const dex_ir::Header* header) {
+ char sanitized[8 * 2 + 1];
+ fprintf(out_file_, "DEX file header:\n");
+ Asciify(sanitized, header->Magic(), 8);
+ fprintf(out_file_, "magic : '%s'\n", sanitized);
+ fprintf(out_file_, "checksum : %08x\n", header->Checksum());
+ fprintf(out_file_, "signature : %02x%02x...%02x%02x\n",
+ header->Signature()[0], header->Signature()[1],
+ header->Signature()[DexFile::kSha1DigestSize - 2],
+ header->Signature()[DexFile::kSha1DigestSize - 1]);
+ fprintf(out_file_, "file_size : %d\n", header->FileSize());
+ fprintf(out_file_, "header_size : %d\n", header->HeaderSize());
+ fprintf(out_file_, "link_size : %d\n", header->LinkSize());
+ fprintf(out_file_, "link_off : %d (0x%06x)\n",
+ header->LinkOffset(), header->LinkOffset());
+ fprintf(out_file_, "string_ids_size : %d\n", header->StringIdsSize());
+ fprintf(out_file_, "string_ids_off : %d (0x%06x)\n",
+ header->StringIdsOffset(), header->StringIdsOffset());
+ fprintf(out_file_, "type_ids_size : %d\n", header->TypeIdsSize());
+ fprintf(out_file_, "type_ids_off : %d (0x%06x)\n",
+ header->TypeIdsOffset(), header->TypeIdsOffset());
+ fprintf(out_file_, "proto_ids_size : %d\n", header->ProtoIdsSize());
+ fprintf(out_file_, "proto_ids_off : %d (0x%06x)\n",
+ header->ProtoIdsOffset(), header->ProtoIdsOffset());
+ fprintf(out_file_, "field_ids_size : %d\n", header->FieldIdsSize());
+ fprintf(out_file_, "field_ids_off : %d (0x%06x)\n",
+ header->FieldIdsOffset(), header->FieldIdsOffset());
+ fprintf(out_file_, "method_ids_size : %d\n", header->MethodIdsSize());
+ fprintf(out_file_, "method_ids_off : %d (0x%06x)\n",
+ header->MethodIdsOffset(), header->MethodIdsOffset());
+ fprintf(out_file_, "class_defs_size : %d\n", header->ClassDefsSize());
+ fprintf(out_file_, "class_defs_off : %d (0x%06x)\n",
+ header->ClassDefsOffset(), header->ClassDefsOffset());
+ fprintf(out_file_, "data_size : %d\n", header->DataSize());
+ fprintf(out_file_, "data_off : %d (0x%06x)\n\n",
+ header->DataOffset(), header->DataOffset());
+}
+
+/*
+ * Dumps a class_def_item.
+ */
+static void DumpClassDef(dex_ir::Header* header, int idx) {
+ // General class information.
+ dex_ir::ClassDef* class_def = header->ClassDefs()[idx].get();
+ fprintf(out_file_, "Class #%d header:\n", idx);
+ fprintf(out_file_, "class_idx : %d\n", class_def->ClassType()->GetOffset());
+ fprintf(out_file_, "access_flags : %d (0x%04x)\n",
+ class_def->GetAccessFlags(), class_def->GetAccessFlags());
+ uint32_t superclass_idx = class_def->Superclass() == nullptr ?
+ DexFile::kDexNoIndex16 : class_def->Superclass()->GetOffset();
+ fprintf(out_file_, "superclass_idx : %d\n", superclass_idx);
+ fprintf(out_file_, "interfaces_off : %d (0x%06x)\n",
+ class_def->InterfacesOffset(), class_def->InterfacesOffset());
+ uint32_t source_file_offset = 0xffffffffU;
+ if (class_def->SourceFile() != nullptr) {
+ source_file_offset = class_def->SourceFile()->GetOffset();
+ }
+ fprintf(out_file_, "source_file_idx : %d\n", source_file_offset);
+ uint32_t annotations_offset = 0;
+ if (class_def->Annotations() != nullptr) {
+ annotations_offset = class_def->Annotations()->GetOffset();
+ }
+ fprintf(out_file_, "annotations_off : %d (0x%06x)\n",
+ annotations_offset, annotations_offset);
+ if (class_def->GetClassData() == nullptr) {
+ fprintf(out_file_, "class_data_off : %d (0x%06x)\n", 0, 0);
+ } else {
+ fprintf(out_file_, "class_data_off : %d (0x%06x)\n",
+ class_def->GetClassData()->GetOffset(), class_def->GetClassData()->GetOffset());
+ }
+
+ // Fields and methods.
+ dex_ir::ClassData* class_data = class_def->GetClassData();
+ if (class_data != nullptr && class_data->StaticFields() != nullptr) {
+ fprintf(out_file_, "static_fields_size : %zu\n", class_data->StaticFields()->size());
+ } else {
+ fprintf(out_file_, "static_fields_size : 0\n");
+ }
+ if (class_data != nullptr && class_data->InstanceFields() != nullptr) {
+ fprintf(out_file_, "instance_fields_size: %zu\n", class_data->InstanceFields()->size());
+ } else {
+ fprintf(out_file_, "instance_fields_size: 0\n");
+ }
+ if (class_data != nullptr && class_data->DirectMethods() != nullptr) {
+ fprintf(out_file_, "direct_methods_size : %zu\n", class_data->DirectMethods()->size());
+ } else {
+ fprintf(out_file_, "direct_methods_size : 0\n");
+ }
+ if (class_data != nullptr && class_data->VirtualMethods() != nullptr) {
+ fprintf(out_file_, "virtual_methods_size: %zu\n", class_data->VirtualMethods()->size());
+ } else {
+ fprintf(out_file_, "virtual_methods_size: 0\n");
+ }
+ fprintf(out_file_, "\n");
+}
+
+/**
+ * Dumps an annotation set item.
+ */
+static void DumpAnnotationSetItem(dex_ir::AnnotationSetItem* set_item) {
+ if (set_item == nullptr || set_item->GetItems()->size() == 0) {
+ fputs(" empty-annotation-set\n", out_file_);
+ return;
+ }
+ for (std::unique_ptr<dex_ir::AnnotationItem>& annotation : *set_item->GetItems()) {
+ if (annotation == nullptr) {
+ continue;
+ }
+ fputs(" ", out_file_);
+ switch (annotation->GetVisibility()) {
+ case DexFile::kDexVisibilityBuild: fputs("VISIBILITY_BUILD ", out_file_); break;
+ case DexFile::kDexVisibilityRuntime: fputs("VISIBILITY_RUNTIME ", out_file_); break;
+ case DexFile::kDexVisibilitySystem: fputs("VISIBILITY_SYSTEM ", out_file_); break;
+ default: fputs("VISIBILITY_UNKNOWN ", out_file_); break;
+ } // switch
+ // Decode raw bytes in annotation.
+ // const uint8_t* rData = annotation->annotation_;
+ dex_ir::ArrayItem* data = annotation->GetItem();
+ DumpEncodedValue(data);
+ fputc('\n', out_file_);
+ }
+}
+
+/*
+ * Dumps class annotations.
+ */
+static void DumpClassAnnotations(dex_ir::Header* header, int idx) {
+ dex_ir::ClassDef* class_def = header->ClassDefs()[idx].get();
+ dex_ir::AnnotationsDirectoryItem* annotations_directory = class_def->Annotations();
+ if (annotations_directory == nullptr) {
+ return; // none
+ }
+
+ fprintf(out_file_, "Class #%d annotations:\n", idx);
+
+ dex_ir::AnnotationSetItem* class_set_item = annotations_directory->GetClassAnnotation();
+ dex_ir::FieldAnnotationVector* fields = annotations_directory->GetFieldAnnotations();
+ dex_ir::MethodAnnotationVector* methods = annotations_directory->GetMethodAnnotations();
+ dex_ir::ParameterAnnotationVector* parameters = annotations_directory->GetParameterAnnotations();
+
+ // Annotations on the class itself.
+ if (class_set_item != nullptr) {
+ fprintf(out_file_, "Annotations on class\n");
+ DumpAnnotationSetItem(class_set_item);
+ }
+
+ // Annotations on fields.
+ if (fields != nullptr) {
+ for (auto& field : *fields) {
+ const dex_ir::FieldId* field_id = field->GetFieldId();
+ const uint32_t field_idx = field_id->GetOffset();
+ const char* field_name = field_id->Name()->Data();
+ fprintf(out_file_, "Annotations on field #%u '%s'\n", field_idx, field_name);
+ DumpAnnotationSetItem(field->GetAnnotationSetItem());
+ }
+ }
+
+ // Annotations on methods.
+ if (methods != nullptr) {
+ for (auto& method : *methods) {
+ const dex_ir::MethodId* method_id = method->GetMethodId();
+ const uint32_t method_idx = method_id->GetOffset();
+ const char* method_name = method_id->Name()->Data();
+ fprintf(out_file_, "Annotations on method #%u '%s'\n", method_idx, method_name);
+ DumpAnnotationSetItem(method->GetAnnotationSetItem());
+ }
+ }
+
+ // Annotations on method parameters.
+ if (parameters != nullptr) {
+ for (auto& parameter : *parameters) {
+ const dex_ir::MethodId* method_id = parameter->GetMethodId();
+ const uint32_t method_idx = method_id->GetOffset();
+ const char* method_name = method_id->Name()->Data();
+ fprintf(out_file_, "Annotations on method #%u '%s' parameters\n", method_idx, method_name);
+ uint32_t j = 0;
+ for (auto& annotation : *parameter->GetAnnotations()) {
+ fprintf(out_file_, "#%u\n", j);
+ DumpAnnotationSetItem(annotation.get());
+ ++j;
+ }
+ }
+ }
+
+ fputc('\n', out_file_);
+}
+
+/*
+ * Dumps an interface that a class declares to implement.
+ */
+static void DumpInterface(const dex_ir::TypeId* type_item, int i) {
+ const char* interface_name = type_item->GetStringId()->Data();
+ if (options_.output_format_ == kOutputPlain) {
+ fprintf(out_file_, " #%d : '%s'\n", i, interface_name);
+ } else {
+ std::string dot(DescriptorToDotWrapper(interface_name));
+ fprintf(out_file_, "<implements name=\"%s\">\n</implements>\n", dot.c_str());
+ }
+}
+
+/*
+ * Dumps the catches table associated with the code.
+ */
+static void DumpCatches(const dex_ir::CodeItem* code) {
+ const uint16_t tries_size = code->TriesSize();
+
+ // No catch table.
+ if (tries_size == 0) {
+ fprintf(out_file_, " catches : (none)\n");
+ return;
+ }
+
+ // Dump all table entries.
+ fprintf(out_file_, " catches : %d\n", tries_size);
+ std::vector<std::unique_ptr<const dex_ir::TryItem>>* tries = code->Tries();
+ for (uint32_t i = 0; i < tries_size; i++) {
+ const dex_ir::TryItem* try_item = (*tries)[i].get();
+ const uint32_t start = try_item->StartAddr();
+ const uint32_t end = start + try_item->InsnCount();
+ fprintf(out_file_, " 0x%04x - 0x%04x\n", start, end);
+ for (auto& handler : try_item->GetHandlers()) {
+ const dex_ir::TypeId* type_id = handler->GetTypeId();
+ const char* descriptor = (type_id == nullptr) ? "<any>" : type_id->GetStringId()->Data();
+ fprintf(out_file_, " %s -> 0x%04x\n", descriptor, handler->GetAddress());
+ } // for
+ } // for
+}
+
+/*
+ * Dumps all positions table entries associated with the code.
+ */
+static void DumpPositionInfo(const dex_ir::CodeItem* code) {
+ dex_ir::DebugInfoItem* debug_info = code->DebugInfo();
+ if (debug_info == nullptr) {
+ return;
+ }
+ std::vector<std::unique_ptr<dex_ir::PositionInfo>>& positions = debug_info->GetPositionInfo();
+ for (size_t i = 0; i < positions.size(); ++i) {
+ fprintf(out_file_, " 0x%04x line=%d\n", positions[i]->address_, positions[i]->line_);
+ }
+}
+
+/*
+ * Dumps all locals table entries associated with the code.
+ */
+static void DumpLocalInfo(const dex_ir::CodeItem* code) {
+ dex_ir::DebugInfoItem* debug_info = code->DebugInfo();
+ if (debug_info == nullptr) {
+ return;
+ }
+ std::vector<std::unique_ptr<dex_ir::LocalInfo>>& locals = debug_info->GetLocalInfo();
+ for (size_t i = 0; i < locals.size(); ++i) {
+ dex_ir::LocalInfo* entry = locals[i].get();
+ fprintf(out_file_, " 0x%04x - 0x%04x reg=%d %s %s %s\n",
+ entry->start_address_, entry->end_address_, entry->reg_,
+ entry->name_.c_str(), entry->descriptor_.c_str(), entry->signature_.c_str());
+ }
+}
+
+/*
+ * Helper for dumpInstruction(), which builds the string
+ * representation for the index in the given instruction.
+ * Returns a pointer to a buffer of sufficient size.
+ */
+static std::unique_ptr<char[]> IndexString(dex_ir::Header* header,
+ const Instruction* dec_insn,
+ size_t buf_size) {
+ std::unique_ptr<char[]> buf(new char[buf_size]);
+ // Determine index and width of the string.
+ uint32_t index = 0;
+ uint32_t width = 4;
+ switch (Instruction::FormatOf(dec_insn->Opcode())) {
+ // SOME NOT SUPPORTED:
+ // case Instruction::k20bc:
+ case Instruction::k21c:
+ case Instruction::k35c:
+ // case Instruction::k35ms:
+ case Instruction::k3rc:
+ // case Instruction::k3rms:
+ // case Instruction::k35mi:
+ // case Instruction::k3rmi:
+ index = dec_insn->VRegB();
+ width = 4;
+ break;
+ case Instruction::k31c:
+ index = dec_insn->VRegB();
+ width = 8;
+ break;
+ case Instruction::k22c:
+ // case Instruction::k22cs:
+ index = dec_insn->VRegC();
+ width = 4;
+ break;
+ default:
+ break;
+ } // switch
+
+ // Determine index type.
+ size_t outSize = 0;
+ switch (Instruction::IndexTypeOf(dec_insn->Opcode())) {
+ case Instruction::kIndexUnknown:
+ // This function should never get called for this type, but do
+ // something sensible here, just to help with debugging.
+ outSize = snprintf(buf.get(), buf_size, "<unknown-index>");
+ break;
+ case Instruction::kIndexNone:
+ // This function should never get called for this type, but do
+ // something sensible here, just to help with debugging.
+ outSize = snprintf(buf.get(), buf_size, "<no-index>");
+ break;
+ case Instruction::kIndexTypeRef:
+ if (index < header->TypeIdsSize()) {
+ const char* tp = header->TypeIds()[index]->GetStringId()->Data();
+ outSize = snprintf(buf.get(), buf_size, "%s // type@%0*x", tp, width, index);
+ } else {
+ outSize = snprintf(buf.get(), buf_size, "<type?> // type@%0*x", width, index);
+ }
+ break;
+ case Instruction::kIndexStringRef:
+ if (index < header->StringIdsSize()) {
+ const char* st = header->StringIds()[index]->Data();
+ outSize = snprintf(buf.get(), buf_size, "\"%s\" // string@%0*x", st, width, index);
+ } else {
+ outSize = snprintf(buf.get(), buf_size, "<string?> // string@%0*x", width, index);
+ }
+ break;
+ case Instruction::kIndexMethodRef:
+ if (index < header->MethodIdsSize()) {
+ dex_ir::MethodId* method_id = header->MethodIds()[index].get();
+ const char* name = method_id->Name()->Data();
+ std::string type_descriptor = GetSignatureForProtoId(method_id->Proto());
+ const char* back_descriptor = method_id->Class()->GetStringId()->Data();
+ outSize = snprintf(buf.get(), buf_size, "%s.%s:%s // method@%0*x",
+ back_descriptor, name, type_descriptor.c_str(), width, index);
+ } else {
+ outSize = snprintf(buf.get(), buf_size, "<method?> // method@%0*x", width, index);
+ }
+ break;
+ case Instruction::kIndexFieldRef:
+ if (index < header->FieldIdsSize()) {
+ dex_ir::FieldId* field_id = header->FieldIds()[index].get();
+ const char* name = field_id->Name()->Data();
+ const char* type_descriptor = field_id->Type()->GetStringId()->Data();
+ const char* back_descriptor = field_id->Class()->GetStringId()->Data();
+ outSize = snprintf(buf.get(), buf_size, "%s.%s:%s // field@%0*x",
+ back_descriptor, name, type_descriptor, width, index);
+ } else {
+ outSize = snprintf(buf.get(), buf_size, "<field?> // field@%0*x", width, index);
+ }
+ break;
+ case Instruction::kIndexVtableOffset:
+ outSize = snprintf(buf.get(), buf_size, "[%0*x] // vtable #%0*x",
+ width, index, width, index);
+ break;
+ case Instruction::kIndexFieldOffset:
+ outSize = snprintf(buf.get(), buf_size, "[obj+%0*x]", width, index);
+ break;
+ // SOME NOT SUPPORTED:
+ // case Instruction::kIndexVaries:
+ // case Instruction::kIndexInlineMethod:
+ default:
+ outSize = snprintf(buf.get(), buf_size, "<?>");
+ break;
+ } // switch
+
+ // Determine success of string construction.
+ if (outSize >= buf_size) {
+ // The buffer wasn't big enough; retry with computed size. Note: snprintf()
+ // doesn't count/ the '\0' as part of its returned size, so we add explicit
+ // space for it here.
+ return IndexString(header, dec_insn, outSize + 1);
+ }
+ return buf;
+}
+
+/*
+ * Dumps a single instruction.
+ */
+static void DumpInstruction(dex_ir::Header* header, const dex_ir::CodeItem* code,
+ uint32_t code_offset, uint32_t insn_idx, uint32_t insn_width,
+ const Instruction* dec_insn) {
+ // Address of instruction (expressed as byte offset).
+ fprintf(out_file_, "%06x:", code_offset + 0x10 + insn_idx * 2);
+
+ // Dump (part of) raw bytes.
+ const uint16_t* insns = code->Insns();
+ for (uint32_t i = 0; i < 8; i++) {
+ if (i < insn_width) {
+ if (i == 7) {
+ fprintf(out_file_, " ... ");
+ } else {
+ // Print 16-bit value in little-endian order.
+ const uint8_t* bytePtr = (const uint8_t*) &insns[insn_idx + i];
+ fprintf(out_file_, " %02x%02x", bytePtr[0], bytePtr[1]);
+ }
+ } else {
+ fputs(" ", out_file_);
+ }
+ } // for
+
+ // Dump pseudo-instruction or opcode.
+ if (dec_insn->Opcode() == Instruction::NOP) {
+ const uint16_t instr = Get2LE((const uint8_t*) &insns[insn_idx]);
+ if (instr == Instruction::kPackedSwitchSignature) {
+ fprintf(out_file_, "|%04x: packed-switch-data (%d units)", insn_idx, insn_width);
+ } else if (instr == Instruction::kSparseSwitchSignature) {
+ fprintf(out_file_, "|%04x: sparse-switch-data (%d units)", insn_idx, insn_width);
+ } else if (instr == Instruction::kArrayDataSignature) {
+ fprintf(out_file_, "|%04x: array-data (%d units)", insn_idx, insn_width);
+ } else {
+ fprintf(out_file_, "|%04x: nop // spacer", insn_idx);
+ }
+ } else {
+ fprintf(out_file_, "|%04x: %s", insn_idx, dec_insn->Name());
+ }
+
+ // Set up additional argument.
+ std::unique_ptr<char[]> index_buf;
+ if (Instruction::IndexTypeOf(dec_insn->Opcode()) != Instruction::kIndexNone) {
+ index_buf = IndexString(header, dec_insn, 200);
+ }
+
+ // Dump the instruction.
+ //
+ // NOTE: pDecInsn->DumpString(pDexFile) differs too much from original.
+ //
+ switch (Instruction::FormatOf(dec_insn->Opcode())) {
+ case Instruction::k10x: // op
+ break;
+ case Instruction::k12x: // op vA, vB
+ fprintf(out_file_, " v%d, v%d", dec_insn->VRegA(), dec_insn->VRegB());
+ break;
+ case Instruction::k11n: // op vA, #+B
+ fprintf(out_file_, " v%d, #int %d // #%x",
+ dec_insn->VRegA(), (int32_t) dec_insn->VRegB(), (uint8_t)dec_insn->VRegB());
+ break;
+ case Instruction::k11x: // op vAA
+ fprintf(out_file_, " v%d", dec_insn->VRegA());
+ break;
+ case Instruction::k10t: // op +AA
+ case Instruction::k20t: { // op +AAAA
+ const int32_t targ = (int32_t) dec_insn->VRegA();
+ fprintf(out_file_, " %04x // %c%04x",
+ insn_idx + targ,
+ (targ < 0) ? '-' : '+',
+ (targ < 0) ? -targ : targ);
+ break;
+ }
+ case Instruction::k22x: // op vAA, vBBBB
+ fprintf(out_file_, " v%d, v%d", dec_insn->VRegA(), dec_insn->VRegB());
+ break;
+ case Instruction::k21t: { // op vAA, +BBBB
+ const int32_t targ = (int32_t) dec_insn->VRegB();
+ fprintf(out_file_, " v%d, %04x // %c%04x", dec_insn->VRegA(),
+ insn_idx + targ,
+ (targ < 0) ? '-' : '+',
+ (targ < 0) ? -targ : targ);
+ break;
+ }
+ case Instruction::k21s: // op vAA, #+BBBB
+ fprintf(out_file_, " v%d, #int %d // #%x",
+ dec_insn->VRegA(), (int32_t) dec_insn->VRegB(), (uint16_t)dec_insn->VRegB());
+ break;
+ case Instruction::k21h: // op vAA, #+BBBB0000[00000000]
+ // The printed format varies a bit based on the actual opcode.
+ if (dec_insn->Opcode() == Instruction::CONST_HIGH16) {
+ const int32_t value = dec_insn->VRegB() << 16;
+ fprintf(out_file_, " v%d, #int %d // #%x",
+ dec_insn->VRegA(), value, (uint16_t) dec_insn->VRegB());
+ } else {
+ const int64_t value = ((int64_t) dec_insn->VRegB()) << 48;
+ fprintf(out_file_, " v%d, #long %" PRId64 " // #%x",
+ dec_insn->VRegA(), value, (uint16_t) dec_insn->VRegB());
+ }
+ break;
+ case Instruction::k21c: // op vAA, thing@BBBB
+ case Instruction::k31c: // op vAA, thing@BBBBBBBB
+ fprintf(out_file_, " v%d, %s", dec_insn->VRegA(), index_buf.get());
+ break;
+ case Instruction::k23x: // op vAA, vBB, vCC
+ fprintf(out_file_, " v%d, v%d, v%d",
+ dec_insn->VRegA(), dec_insn->VRegB(), dec_insn->VRegC());
+ break;
+ case Instruction::k22b: // op vAA, vBB, #+CC
+ fprintf(out_file_, " v%d, v%d, #int %d // #%02x",
+ dec_insn->VRegA(), dec_insn->VRegB(),
+ (int32_t) dec_insn->VRegC(), (uint8_t) dec_insn->VRegC());
+ break;
+ case Instruction::k22t: { // op vA, vB, +CCCC
+ const int32_t targ = (int32_t) dec_insn->VRegC();
+ fprintf(out_file_, " v%d, v%d, %04x // %c%04x",
+ dec_insn->VRegA(), dec_insn->VRegB(),
+ insn_idx + targ,
+ (targ < 0) ? '-' : '+',
+ (targ < 0) ? -targ : targ);
+ break;
+ }
+ case Instruction::k22s: // op vA, vB, #+CCCC
+ fprintf(out_file_, " v%d, v%d, #int %d // #%04x",
+ dec_insn->VRegA(), dec_insn->VRegB(),
+ (int32_t) dec_insn->VRegC(), (uint16_t) dec_insn->VRegC());
+ break;
+ case Instruction::k22c: // op vA, vB, thing@CCCC
+ // NOT SUPPORTED:
+ // case Instruction::k22cs: // [opt] op vA, vB, field offset CCCC
+ fprintf(out_file_, " v%d, v%d, %s",
+ dec_insn->VRegA(), dec_insn->VRegB(), index_buf.get());
+ break;
+ case Instruction::k30t:
+ fprintf(out_file_, " #%08x", dec_insn->VRegA());
+ break;
+ case Instruction::k31i: { // op vAA, #+BBBBBBBB
+ // This is often, but not always, a float.
+ union {
+ float f;
+ uint32_t i;
+ } conv;
+ conv.i = dec_insn->VRegB();
+ fprintf(out_file_, " v%d, #float %g // #%08x",
+ dec_insn->VRegA(), conv.f, dec_insn->VRegB());
+ break;
+ }
+ case Instruction::k31t: // op vAA, offset +BBBBBBBB
+ fprintf(out_file_, " v%d, %08x // +%08x",
+ dec_insn->VRegA(), insn_idx + dec_insn->VRegB(), dec_insn->VRegB());
+ break;
+ case Instruction::k32x: // op vAAAA, vBBBB
+ fprintf(out_file_, " v%d, v%d", dec_insn->VRegA(), dec_insn->VRegB());
+ break;
+ case Instruction::k35c: { // op {vC, vD, vE, vF, vG}, thing@BBBB
+ // NOT SUPPORTED:
+ // case Instruction::k35ms: // [opt] invoke-virtual+super
+ // case Instruction::k35mi: // [opt] inline invoke
+ uint32_t arg[Instruction::kMaxVarArgRegs];
+ dec_insn->GetVarArgs(arg);
+ fputs(" {", out_file_);
+ for (int i = 0, n = dec_insn->VRegA(); i < n; i++) {
+ if (i == 0) {
+ fprintf(out_file_, "v%d", arg[i]);
+ } else {
+ fprintf(out_file_, ", v%d", arg[i]);
+ }
+ } // for
+ fprintf(out_file_, "}, %s", index_buf.get());
+ break;
+ }
+ case Instruction::k3rc: // op {vCCCC .. v(CCCC+AA-1)}, thing@BBBB
+ // NOT SUPPORTED:
+ // case Instruction::k3rms: // [opt] invoke-virtual+super/range
+ // case Instruction::k3rmi: // [opt] execute-inline/range
+ {
+ // This doesn't match the "dx" output when some of the args are
+ // 64-bit values -- dx only shows the first register.
+ fputs(" {", out_file_);
+ for (int i = 0, n = dec_insn->VRegA(); i < n; i++) {
+ if (i == 0) {
+ fprintf(out_file_, "v%d", dec_insn->VRegC() + i);
+ } else {
+ fprintf(out_file_, ", v%d", dec_insn->VRegC() + i);
+ }
+ } // for
+ fprintf(out_file_, "}, %s", index_buf.get());
+ }
+ break;
+ case Instruction::k51l: { // op vAA, #+BBBBBBBBBBBBBBBB
+ // This is often, but not always, a double.
+ union {
+ double d;
+ uint64_t j;
+ } conv;
+ conv.j = dec_insn->WideVRegB();
+ fprintf(out_file_, " v%d, #double %g // #%016" PRIx64,
+ dec_insn->VRegA(), conv.d, dec_insn->WideVRegB());
+ break;
+ }
+ // NOT SUPPORTED:
+ // case Instruction::k00x: // unknown op or breakpoint
+ // break;
+ default:
+ fprintf(out_file_, " ???");
+ break;
+ } // switch
+
+ fputc('\n', out_file_);
+}
+
+/*
+ * Dumps a bytecode disassembly.
+ */
+static void DumpBytecodes(dex_ir::Header* header, uint32_t idx,
+ const dex_ir::CodeItem* code, uint32_t code_offset) {
+ dex_ir::MethodId* method_id = header->MethodIds()[idx].get();
+ const char* name = method_id->Name()->Data();
+ std::string type_descriptor = GetSignatureForProtoId(method_id->Proto());
+ const char* back_descriptor = method_id->Class()->GetStringId()->Data();
+
+ // Generate header.
+ std::string dot(DescriptorToDotWrapper(back_descriptor));
+ fprintf(out_file_, "%06x: |[%06x] %s.%s:%s\n",
+ code_offset, code_offset, dot.c_str(), name, type_descriptor.c_str());
+
+ // Iterate over all instructions.
+ const uint16_t* insns = code->Insns();
+ for (uint32_t insn_idx = 0; insn_idx < code->InsnsSize();) {
+ const Instruction* instruction = Instruction::At(&insns[insn_idx]);
+ const uint32_t insn_width = instruction->SizeInCodeUnits();
+ if (insn_width == 0) {
+ fprintf(stderr, "GLITCH: zero-width instruction at idx=0x%04x\n", insn_idx);
+ break;
+ }
+ DumpInstruction(header, code, code_offset, insn_idx, insn_width, instruction);
+ insn_idx += insn_width;
+ } // for
+}
+
+/*
+ * Dumps code of a method.
+ */
+static void DumpCode(dex_ir::Header* header, uint32_t idx, const dex_ir::CodeItem* code,
+ uint32_t code_offset) {
+ fprintf(out_file_, " registers : %d\n", code->RegistersSize());
+ fprintf(out_file_, " ins : %d\n", code->InsSize());
+ fprintf(out_file_, " outs : %d\n", code->OutsSize());
+ fprintf(out_file_, " insns size : %d 16-bit code units\n",
+ code->InsnsSize());
+
+ // Bytecode disassembly, if requested.
+ if (options_.disassemble_) {
+ DumpBytecodes(header, idx, code, code_offset);
+ }
+
+ // Try-catch blocks.
+ DumpCatches(code);
+
+ // Positions and locals table in the debug info.
+ fprintf(out_file_, " positions : \n");
+ DumpPositionInfo(code);
+ fprintf(out_file_, " locals : \n");
+ DumpLocalInfo(code);
+}
+
+/*
+ * Dumps a method.
+ */
+static void DumpMethod(dex_ir::Header* header, uint32_t idx, uint32_t flags,
+ const dex_ir::CodeItem* code, int i) {
+ // Bail for anything private if export only requested.
+ if (options_.exports_only_ && (flags & (kAccPublic | kAccProtected)) == 0) {
+ return;
+ }
+
+ dex_ir::MethodId* method_id = header->MethodIds()[idx].get();
+ const char* name = method_id->Name()->Data();
+ char* type_descriptor = strdup(GetSignatureForProtoId(method_id->Proto()).c_str());
+ const char* back_descriptor = method_id->Class()->GetStringId()->Data();
+ char* access_str = CreateAccessFlagStr(flags, kAccessForMethod);
+
+ if (options_.output_format_ == kOutputPlain) {
+ fprintf(out_file_, " #%d : (in %s)\n", i, back_descriptor);
+ fprintf(out_file_, " name : '%s'\n", name);
+ fprintf(out_file_, " type : '%s'\n", type_descriptor);
+ fprintf(out_file_, " access : 0x%04x (%s)\n", flags, access_str);
+ if (code == nullptr) {
+ fprintf(out_file_, " code : (none)\n");
+ } else {
+ fprintf(out_file_, " code -\n");
+ DumpCode(header, idx, code, code->GetOffset());
+ }
+ if (options_.disassemble_) {
+ fputc('\n', out_file_);
+ }
+ } else if (options_.output_format_ == kOutputXml) {
+ const bool constructor = (name[0] == '<');
+
+ // Method name and prototype.
+ if (constructor) {
+ std::string dot(DescriptorClassToDot(back_descriptor));
+ fprintf(out_file_, "<constructor name=\"%s\"\n", dot.c_str());
+ dot = DescriptorToDotWrapper(back_descriptor);
+ fprintf(out_file_, " type=\"%s\"\n", dot.c_str());
+ } else {
+ fprintf(out_file_, "<method name=\"%s\"\n", name);
+ const char* return_type = strrchr(type_descriptor, ')');
+ if (return_type == nullptr) {
+ fprintf(stderr, "bad method type descriptor '%s'\n", type_descriptor);
+ goto bail;
+ }
+ std::string dot(DescriptorToDotWrapper(return_type + 1));
+ fprintf(out_file_, " return=\"%s\"\n", dot.c_str());
+ fprintf(out_file_, " abstract=%s\n", QuotedBool((flags & kAccAbstract) != 0));
+ fprintf(out_file_, " native=%s\n", QuotedBool((flags & kAccNative) != 0));
+ fprintf(out_file_, " synchronized=%s\n", QuotedBool(
+ (flags & (kAccSynchronized | kAccDeclaredSynchronized)) != 0));
+ }
+
+ // Additional method flags.
+ fprintf(out_file_, " static=%s\n", QuotedBool((flags & kAccStatic) != 0));
+ fprintf(out_file_, " final=%s\n", QuotedBool((flags & kAccFinal) != 0));
+ // The "deprecated=" not knowable w/o parsing annotations.
+ fprintf(out_file_, " visibility=%s\n>\n", QuotedVisibility(flags));
+
+ // Parameters.
+ if (type_descriptor[0] != '(') {
+ fprintf(stderr, "ERROR: bad descriptor '%s'\n", type_descriptor);
+ goto bail;
+ }
+ char* tmp_buf = reinterpret_cast<char*>(malloc(strlen(type_descriptor) + 1));
+ const char* base = type_descriptor + 1;
+ int arg_num = 0;
+ while (*base != ')') {
+ char* cp = tmp_buf;
+ while (*base == '[') {
+ *cp++ = *base++;
+ }
+ if (*base == 'L') {
+ // Copy through ';'.
+ do {
+ *cp = *base++;
+ } while (*cp++ != ';');
+ } else {
+ // Primitive char, copy it.
+ if (strchr("ZBCSIFJD", *base) == nullptr) {
+ fprintf(stderr, "ERROR: bad method signature '%s'\n", base);
+ break; // while
+ }
+ *cp++ = *base++;
+ }
+ // Null terminate and display.
+ *cp++ = '\0';
+ std::string dot(DescriptorToDotWrapper(tmp_buf));
+ fprintf(out_file_, "<parameter name=\"arg%d\" type=\"%s\">\n"
+ "</parameter>\n", arg_num++, dot.c_str());
+ } // while
+ free(tmp_buf);
+ if (constructor) {
+ fprintf(out_file_, "</constructor>\n");
+ } else {
+ fprintf(out_file_, "</method>\n");
+ }
+ }
+
+ bail:
+ free(type_descriptor);
+ free(access_str);
+}
+
+/*
+ * Dumps a static (class) field.
+ */
+static void DumpSField(dex_ir::Header* header, uint32_t idx, uint32_t flags,
+ int i, dex_ir::ArrayItem* init) {
+ // Bail for anything private if export only requested.
+ if (options_.exports_only_ && (flags & (kAccPublic | kAccProtected)) == 0) {
+ return;
+ }
+
+ dex_ir::FieldId* field_id = header->FieldIds()[idx].get();
+ const char* name = field_id->Name()->Data();
+ const char* type_descriptor = field_id->Type()->GetStringId()->Data();
+ const char* back_descriptor = field_id->Class()->GetStringId()->Data();
+ char* access_str = CreateAccessFlagStr(flags, kAccessForField);
+
+ if (options_.output_format_ == kOutputPlain) {
+ fprintf(out_file_, " #%d : (in %s)\n", i, back_descriptor);
+ fprintf(out_file_, " name : '%s'\n", name);
+ fprintf(out_file_, " type : '%s'\n", type_descriptor);
+ fprintf(out_file_, " access : 0x%04x (%s)\n", flags, access_str);
+ if (init != nullptr) {
+ fputs(" value : ", out_file_);
+ DumpEncodedValue(init);
+ fputs("\n", out_file_);
+ }
+ } else if (options_.output_format_ == kOutputXml) {
+ fprintf(out_file_, "<field name=\"%s\"\n", name);
+ std::string dot(DescriptorToDotWrapper(type_descriptor));
+ fprintf(out_file_, " type=\"%s\"\n", dot.c_str());
+ fprintf(out_file_, " transient=%s\n", QuotedBool((flags & kAccTransient) != 0));
+ fprintf(out_file_, " volatile=%s\n", QuotedBool((flags & kAccVolatile) != 0));
+ // The "value=" is not knowable w/o parsing annotations.
+ fprintf(out_file_, " static=%s\n", QuotedBool((flags & kAccStatic) != 0));
+ fprintf(out_file_, " final=%s\n", QuotedBool((flags & kAccFinal) != 0));
+ // The "deprecated=" is not knowable w/o parsing annotations.
+ fprintf(out_file_, " visibility=%s\n", QuotedVisibility(flags));
+ if (init != nullptr) {
+ fputs(" value=\"", out_file_);
+ DumpEncodedValue(init);
+ fputs("\"\n", out_file_);
+ }
+ fputs(">\n</field>\n", out_file_);
+ }
+
+ free(access_str);
+}
+
+/*
+ * Dumps an instance field.
+ */
+static void DumpIField(dex_ir::Header* header, uint32_t idx, uint32_t flags, int i) {
+ DumpSField(header, idx, flags, i, nullptr);
+}
+
+/*
+ * Dumping a CFG. Note that this will do duplicate work. utils.h doesn't expose the code-item
+ * version, so the DumpMethodCFG code will have to iterate again to find it. But dexdump is a
+ * tool, so this is not performance-critical.
+ */
+
+static void DumpCFG(const DexFile* dex_file,
+ uint32_t dex_method_idx,
+ const DexFile::CodeItem* code) {
+ if (code != nullptr) {
+ std::ostringstream oss;
+ DumpMethodCFG(dex_file, dex_method_idx, oss);
+ fprintf(out_file_, "%s", oss.str().c_str());
+ }
+}
+
+static void DumpCFG(const DexFile* dex_file, int idx) {
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(idx);
+ const uint8_t* class_data = dex_file->GetClassData(class_def);
+ if (class_data == nullptr) { // empty class such as a marker interface?
+ return;
+ }
+ ClassDataItemIterator it(*dex_file, class_data);
+ while (it.HasNextStaticField()) {
+ it.Next();
+ }
+ while (it.HasNextInstanceField()) {
+ it.Next();
+ }
+ while (it.HasNextDirectMethod()) {
+ DumpCFG(dex_file,
+ it.GetMemberIndex(),
+ it.GetMethodCodeItem());
+ it.Next();
+ }
+ while (it.HasNextVirtualMethod()) {
+ DumpCFG(dex_file,
+ it.GetMemberIndex(),
+ it.GetMethodCodeItem());
+ it.Next();
+ }
+}
+
+/*
+ * Dumps the class.
+ *
+ * Note "idx" is a DexClassDef index, not a DexTypeId index.
+ *
+ * If "*last_package" is nullptr or does not match the current class' package,
+ * the value will be replaced with a newly-allocated string.
+ */
+static void DumpClass(const DexFile* dex_file,
+ dex_ir::Header* header,
+ int idx,
+ char** last_package) {
+ dex_ir::ClassDef* class_def = header->ClassDefs()[idx].get();
+ // Omitting non-public class.
+ if (options_.exports_only_ && (class_def->GetAccessFlags() & kAccPublic) == 0) {
+ return;
+ }
+
+ if (options_.show_section_headers_) {
+ DumpClassDef(header, idx);
+ }
+
+ if (options_.show_annotations_) {
+ DumpClassAnnotations(header, idx);
+ }
+
+ if (options_.show_cfg_) {
+ DumpCFG(dex_file, idx);
+ return;
+ }
+
+ // For the XML output, show the package name. Ideally we'd gather
+ // up the classes, sort them, and dump them alphabetically so the
+ // package name wouldn't jump around, but that's not a great plan
+ // for something that needs to run on the device.
+ const char* class_descriptor = header->ClassDefs()[idx]->ClassType()->GetStringId()->Data();
+ if (!(class_descriptor[0] == 'L' &&
+ class_descriptor[strlen(class_descriptor)-1] == ';')) {
+ // Arrays and primitives should not be defined explicitly. Keep going?
+ fprintf(stderr, "Malformed class name '%s'\n", class_descriptor);
+ } else if (options_.output_format_ == kOutputXml) {
+ char* mangle = strdup(class_descriptor + 1);
+ mangle[strlen(mangle)-1] = '\0';
+
+ // Reduce to just the package name.
+ char* last_slash = strrchr(mangle, '/');
+ if (last_slash != nullptr) {
+ *last_slash = '\0';
+ } else {
+ *mangle = '\0';
+ }
+
+ for (char* cp = mangle; *cp != '\0'; cp++) {
+ if (*cp == '/') {
+ *cp = '.';
+ }
+ } // for
+
+ if (*last_package == nullptr || strcmp(mangle, *last_package) != 0) {
+ // Start of a new package.
+ if (*last_package != nullptr) {
+ fprintf(out_file_, "</package>\n");
+ }
+ fprintf(out_file_, "<package name=\"%s\"\n>\n", mangle);
+ free(*last_package);
+ *last_package = mangle;
+ } else {
+ free(mangle);
+ }
+ }
+
+ // General class information.
+ char* access_str = CreateAccessFlagStr(class_def->GetAccessFlags(), kAccessForClass);
+ const char* superclass_descriptor = nullptr;
+ if (class_def->Superclass() != nullptr) {
+ superclass_descriptor = class_def->Superclass()->GetStringId()->Data();
+ }
+ if (options_.output_format_ == kOutputPlain) {
+ fprintf(out_file_, "Class #%d -\n", idx);
+ fprintf(out_file_, " Class descriptor : '%s'\n", class_descriptor);
+ fprintf(out_file_, " Access flags : 0x%04x (%s)\n",
+ class_def->GetAccessFlags(), access_str);
+ if (superclass_descriptor != nullptr) {
+ fprintf(out_file_, " Superclass : '%s'\n", superclass_descriptor);
+ }
+ fprintf(out_file_, " Interfaces -\n");
+ } else {
+ std::string dot(DescriptorClassToDot(class_descriptor));
+ fprintf(out_file_, "<class name=\"%s\"\n", dot.c_str());
+ if (superclass_descriptor != nullptr) {
+ dot = DescriptorToDotWrapper(superclass_descriptor);
+ fprintf(out_file_, " extends=\"%s\"\n", dot.c_str());
+ }
+ fprintf(out_file_, " interface=%s\n",
+ QuotedBool((class_def->GetAccessFlags() & kAccInterface) != 0));
+ fprintf(out_file_, " abstract=%s\n",
+ QuotedBool((class_def->GetAccessFlags() & kAccAbstract) != 0));
+ fprintf(out_file_, " static=%s\n", QuotedBool((class_def->GetAccessFlags() & kAccStatic) != 0));
+ fprintf(out_file_, " final=%s\n", QuotedBool((class_def->GetAccessFlags() & kAccFinal) != 0));
+ // The "deprecated=" not knowable w/o parsing annotations.
+ fprintf(out_file_, " visibility=%s\n", QuotedVisibility(class_def->GetAccessFlags()));
+ fprintf(out_file_, ">\n");
+ }
+
+ // Interfaces.
+ dex_ir::TypeIdVector* interfaces = class_def->Interfaces();
+ if (interfaces != nullptr) {
+ for (uint32_t i = 0; i < interfaces->size(); i++) {
+ DumpInterface((*interfaces)[i], i);
+ } // for
+ }
+
+ // Fields and methods.
+ dex_ir::ClassData* class_data = class_def->GetClassData();
+ // Prepare data for static fields.
+ std::vector<std::unique_ptr<dex_ir::ArrayItem>>* static_values = class_def->StaticValues();
+ const uint32_t static_values_size = (static_values == nullptr) ? 0 : static_values->size();
+
+ // Static fields.
+ if (options_.output_format_ == kOutputPlain) {
+ fprintf(out_file_, " Static fields -\n");
+ }
+ if (class_data != nullptr) {
+ dex_ir::FieldItemVector* static_fields = class_data->StaticFields();
+ if (static_fields != nullptr) {
+ for (uint32_t i = 0; i < static_fields->size(); i++) {
+ DumpSField(header,
+ (*static_fields)[i]->GetFieldId()->GetOffset(),
+ (*static_fields)[i]->GetAccessFlags(),
+ i,
+ i < static_values_size ? (*static_values)[i].get() : nullptr);
+ } // for
+ }
+ }
+
+ // Instance fields.
+ if (options_.output_format_ == kOutputPlain) {
+ fprintf(out_file_, " Instance fields -\n");
+ }
+ if (class_data != nullptr) {
+ dex_ir::FieldItemVector* instance_fields = class_data->InstanceFields();
+ if (instance_fields != nullptr) {
+ for (uint32_t i = 0; i < instance_fields->size(); i++) {
+ DumpIField(header,
+ (*instance_fields)[i]->GetFieldId()->GetOffset(),
+ (*instance_fields)[i]->GetAccessFlags(),
+ i);
+ } // for
+ }
+ }
+
+ // Direct methods.
+ if (options_.output_format_ == kOutputPlain) {
+ fprintf(out_file_, " Direct methods -\n");
+ }
+ if (class_data != nullptr) {
+ dex_ir::MethodItemVector* direct_methods = class_data->DirectMethods();
+ if (direct_methods != nullptr) {
+ for (uint32_t i = 0; i < direct_methods->size(); i++) {
+ DumpMethod(header,
+ (*direct_methods)[i]->GetMethodId()->GetOffset(),
+ (*direct_methods)[i]->GetAccessFlags(),
+ (*direct_methods)[i]->GetCodeItem(),
+ i);
+ } // for
+ }
+ }
+
+ // Virtual methods.
+ if (options_.output_format_ == kOutputPlain) {
+ fprintf(out_file_, " Virtual methods -\n");
+ }
+ if (class_data != nullptr) {
+ dex_ir::MethodItemVector* virtual_methods = class_data->VirtualMethods();
+ if (virtual_methods != nullptr) {
+ for (uint32_t i = 0; i < virtual_methods->size(); i++) {
+ DumpMethod(header,
+ (*virtual_methods)[i]->GetMethodId()->GetOffset(),
+ (*virtual_methods)[i]->GetAccessFlags(),
+ (*virtual_methods)[i]->GetCodeItem(),
+ i);
+ } // for
+ }
+ }
+
+ // End of class.
+ if (options_.output_format_ == kOutputPlain) {
+ const char* file_name = "unknown";
+ if (class_def->SourceFile() != nullptr) {
+ file_name = class_def->SourceFile()->Data();
+ }
+ const dex_ir::StringId* source_file = class_def->SourceFile();
+ fprintf(out_file_, " source_file_idx : %d (%s)\n\n",
+ source_file == nullptr ? 0xffffffffU : source_file->GetOffset(), file_name);
+ } else if (options_.output_format_ == kOutputXml) {
+ fprintf(out_file_, "</class>\n");
+ }
+
+ free(access_str);
+}
+
+/*
+ * Dumps the requested sections of the file.
+ */
+static void ProcessDexFile(const char* file_name, const DexFile* dex_file) {
+ if (options_.verbose_) {
+ fprintf(out_file_, "Opened '%s', DEX version '%.3s'\n",
+ file_name, dex_file->GetHeader().magic_ + 4);
+ }
+ std::unique_ptr<dex_ir::Header> header(dex_ir::DexIrBuilder(*dex_file));
+
+ // Headers.
+ if (options_.show_file_headers_) {
+ DumpFileHeader(header.get());
+ }
+
+ // Open XML context.
+ if (options_.output_format_ == kOutputXml) {
+ fprintf(out_file_, "<api>\n");
+ }
+
+ // Iterate over all classes.
+ char* package = nullptr;
+ const uint32_t class_defs_size = header->ClassDefsSize();
+ for (uint32_t i = 0; i < class_defs_size; i++) {
+ DumpClass(dex_file, header.get(), i, &package);
+ } // for
+
+ // Free the last package allocated.
+ if (package != nullptr) {
+ fprintf(out_file_, "</package>\n");
+ free(package);
+ }
+
+ // Close XML context.
+ if (options_.output_format_ == kOutputXml) {
+ fprintf(out_file_, "</api>\n");
+ }
+}
+
+/*
+ * Processes a single file (either direct .dex or indirect .zip/.jar/.apk).
+ */
+int ProcessFile(const char* file_name) {
+ if (options_.verbose_) {
+ fprintf(out_file_, "Processing '%s'...\n", file_name);
+ }
+
+ // If the file is not a .dex file, the function tries .zip/.jar/.apk files,
+ // all of which are Zip archives with "classes.dex" inside.
+ const bool verify_checksum = !options_.ignore_bad_checksum_;
+ std::string error_msg;
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ if (!DexFile::Open(file_name, file_name, verify_checksum, &error_msg, &dex_files)) {
+ // Display returned error message to user. Note that this error behavior
+ // differs from the error messages shown by the original Dalvik dexdump.
+ fputs(error_msg.c_str(), stderr);
+ fputc('\n', stderr);
+ return -1;
+ }
+
+ // Success. Either report checksum verification or process
+ // all dex files found in given file.
+ if (options_.checksum_only_) {
+ fprintf(out_file_, "Checksum verified\n");
+ } else {
+ for (size_t i = 0; i < dex_files.size(); i++) {
+ ProcessDexFile(file_name, dex_files[i].get());
+ }
+ }
+ return 0;
+}
+
+} // namespace art
diff --git a/dexlayout/dexlayout.h b/dexlayout/dexlayout.h
new file mode 100644
index 0000000..bae587d
--- /dev/null
+++ b/dexlayout/dexlayout.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Header file of the dexlayout utility.
+ *
+ * This is a tool to read dex files into an internal representation,
+ * reorganize the representation, and emit dex files with a better
+ * file layout.
+ */
+
+#ifndef ART_DEXLAYOUT_DEXLAYOUT_H_
+#define ART_DEXLAYOUT_DEXLAYOUT_H_
+
+#include <stdint.h>
+#include <stdio.h>
+
+namespace art {
+
+/* Supported output formats. */
+enum OutputFormat {
+ kOutputPlain = 0, // default
+ kOutputXml, // XML-style
+};
+
+/* Command-line options. */
+struct Options {
+ bool build_dex_ir_;
+ bool checksum_only_;
+ bool disassemble_;
+ bool exports_only_;
+ bool ignore_bad_checksum_;
+ bool show_annotations_;
+ bool show_cfg_;
+ bool show_file_headers_;
+ bool show_section_headers_;
+ bool verbose_;
+ OutputFormat output_format_;
+ const char* output_file_name_;
+};
+
+/* Prototypes. */
+extern struct Options options_;
+extern FILE* out_file_;
+int ProcessFile(const char* file_name);
+
+} // namespace art
+
+#endif // ART_DEXLAYOUT_DEXLAYOUT_H_
diff --git a/dexlayout/dexlayout_main.cc b/dexlayout/dexlayout_main.cc
new file mode 100644
index 0000000..286a0c6
--- /dev/null
+++ b/dexlayout/dexlayout_main.cc
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Main driver of the dexlayout utility.
+ *
+ * This is a tool to read dex files into an internal representation,
+ * reorganize the representation, and emit dex files with a better
+ * file layout.
+ */
+
+#include "dexlayout.h"
+
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "mem_map.h"
+#include "runtime.h"
+
+namespace art {
+
+static const char* kProgramName = "dexlayout";
+
+/*
+ * Shows usage.
+ */
+static void Usage(void) {
+ fprintf(stderr, "Copyright (C) 2007 The Android Open Source Project\n\n");
+ fprintf(stderr, "%s: [-a] [-c] [-d] [-e] [-f] [-h] [-i] [-l layout] [-o outfile]"
+ " dexfile...\n\n", kProgramName);
+ fprintf(stderr, " -a : display annotations\n");
+ fprintf(stderr, " -b : build dex_ir\n");
+ fprintf(stderr, " -c : verify checksum and exit\n");
+ fprintf(stderr, " -d : disassemble code sections\n");
+ fprintf(stderr, " -e : display exported items only\n");
+ fprintf(stderr, " -f : display summary information from file header\n");
+ fprintf(stderr, " -g : display CFG for dex\n");
+ fprintf(stderr, " -h : display file header details\n");
+ fprintf(stderr, " -i : ignore checksum failures\n");
+ fprintf(stderr, " -l : output layout, either 'plain' or 'xml'\n");
+ fprintf(stderr, " -o : output file name (defaults to stdout)\n");
+}
+
+/*
+ * Main driver of the dexlayout utility.
+ */
+int DexlayoutDriver(int argc, char** argv) {
+ // Art specific set up.
+ InitLogging(argv);
+ MemMap::Init();
+
+ // Reset options.
+ bool want_usage = false;
+ memset(&options_, 0, sizeof(options_));
+ options_.verbose_ = true;
+
+ // Parse all arguments.
+ while (1) {
+ const int ic = getopt(argc, argv, "abcdefghil:o:");
+ if (ic < 0) {
+ break; // done
+ }
+ switch (ic) {
+ case 'a': // display annotations
+ options_.show_annotations_ = true;
+ break;
+ case 'b': // build dex_ir
+ options_.build_dex_ir_ = true;
+ break;
+ case 'c': // verify the checksum then exit
+ options_.checksum_only_ = true;
+ break;
+ case 'd': // disassemble Dalvik instructions
+ options_.disassemble_ = true;
+ break;
+ case 'e': // exported items only
+ options_.exports_only_ = true;
+ break;
+ case 'f': // display outer file header
+ options_.show_file_headers_ = true;
+ break;
+ case 'g': // display cfg
+ options_.show_cfg_ = true;
+ break;
+ case 'h': // display section headers, i.e. all meta-data
+ options_.show_section_headers_ = true;
+ break;
+ case 'i': // continue even if checksum is bad
+ options_.ignore_bad_checksum_ = true;
+ break;
+ case 'l': // layout
+ if (strcmp(optarg, "plain") == 0) {
+ options_.output_format_ = kOutputPlain;
+ } else if (strcmp(optarg, "xml") == 0) {
+ options_.output_format_ = kOutputXml;
+ options_.verbose_ = false;
+ } else {
+ want_usage = true;
+ }
+ break;
+ case 'o': // output file
+ options_.output_file_name_ = optarg;
+ break;
+ default:
+ want_usage = true;
+ break;
+ } // switch
+ } // while
+
+ // Detect early problems.
+ if (optind == argc) {
+ fprintf(stderr, "%s: no file specified\n", kProgramName);
+ want_usage = true;
+ }
+ if (options_.checksum_only_ && options_.ignore_bad_checksum_) {
+ fprintf(stderr, "Can't specify both -c and -i\n");
+ want_usage = true;
+ }
+ if (want_usage) {
+ Usage();
+ return 2;
+ }
+
+ // Open alternative output file.
+ if (options_.output_file_name_) {
+ out_file_ = fopen(options_.output_file_name_, "w");
+ if (!out_file_) {
+ fprintf(stderr, "Can't open %s\n", options_.output_file_name_);
+ return 1;
+ }
+ }
+
+ // Process all files supplied on command line.
+ int result = 0;
+ while (optind < argc) {
+ result |= ProcessFile(argv[optind++]);
+ } // while
+ return result != 0;
+}
+
+} // namespace art
+
+int main(int argc, char** argv) {
+ return art::DexlayoutDriver(argc, argv);
+}
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
new file mode 100644
index 0000000..42b64c3
--- /dev/null
+++ b/dexlayout/dexlayout_test.cc
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string>
+#include <vector>
+#include <sstream>
+
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "base/stringprintf.h"
+#include "common_runtime_test.h"
+#include "utils.h"
+
+namespace art {
+
+class DexLayoutTest : public CommonRuntimeTest {
+ protected:
+ virtual void SetUp() {
+ CommonRuntimeTest::SetUp();
+ // TODO: Test with other dex files for improved coverage.
+ // Dogfood our own lib core dex file.
+ dex_file_ = GetLibCoreDexFileNames()[0];
+ }
+
+ // Runs test with given arguments.
+ bool Exec(std::string* error_msg) {
+ // TODO: dexdump2 -> dexdump ?
+ ScratchFile dexdump_output;
+ std::string dexdump_filename = dexdump_output.GetFilename();
+ std::string dexdump = GetTestAndroidRoot() + "/bin/dexdump2";
+ EXPECT_TRUE(OS::FileExists(dexdump.c_str())) << dexdump << " should be a valid file path";
+ std::vector<std::string> dexdump_exec_argv =
+ { dexdump, "-d", "-f", "-h", "-l", "plain", "-o", dexdump_filename, dex_file_ };
+
+ ScratchFile dexlayout_output;
+ std::string dexlayout_filename = dexlayout_output.GetFilename();
+ std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
+ EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
+ std::vector<std::string> dexlayout_exec_argv =
+ { dexlayout, "-d", "-f", "-h", "-l", "plain", "-o", dexlayout_filename, dex_file_ };
+
+ if (!::art::Exec(dexdump_exec_argv, error_msg)) {
+ return false;
+ }
+ if (!::art::Exec(dexlayout_exec_argv, error_msg)) {
+ return false;
+ }
+ std::vector<std::string> diff_exec_argv =
+ { "/usr/bin/diff", dexdump_filename, dexlayout_filename };
+ if (!::art::Exec(diff_exec_argv, error_msg)) {
+ return false;
+ }
+ return true;
+ }
+
+ std::string dex_file_;
+};
+
+
+TEST_F(DexLayoutTest, FullPlainOutput) {
+ // Disable test on target.
+ TEST_DISABLED_FOR_TARGET();
+ std::string error_msg;
+ ASSERT_TRUE(Exec(&error_msg)) << error_msg;
+}
+
+} // namespace art
diff --git a/dexlist/Android.bp b/dexlist/Android.bp
new file mode 100644
index 0000000..8e3c91d
--- /dev/null
+++ b/dexlist/Android.bp
@@ -0,0 +1,21 @@
+// Copyright (C) 2015 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+art_cc_binary {
+ name: "dexlist",
+ host_supported: true,
+ srcs: ["dexlist.cc"],
+ cflags: ["-Wall"],
+ shared_libs: ["libart"],
+}
diff --git a/dexlist/Android.mk b/dexlist/Android.mk
deleted file mode 100755
index 6ec6c97..0000000
--- a/dexlist/Android.mk
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# TODO(ajcbik): Art-i-fy this makefile
-
-LOCAL_PATH:= $(call my-dir)
-
-dexlist_src_files := dexlist.cc
-dexlist_c_includes := art/runtime
-dexlist_libraries := libart
-
-##
-## Build the device command line tool dexlist.
-##
-
-ifneq ($(SDK_ONLY),true) # SDK_only doesn't need device version
-include $(CLEAR_VARS)
-LOCAL_CPP_EXTENSION := cc
-LOCAL_SRC_FILES := $(dexlist_src_files)
-LOCAL_C_INCLUDES := $(dexlist_c_includes)
-LOCAL_CFLAGS += -Wall
-LOCAL_SHARED_LIBRARIES += $(dexlist_libraries)
-LOCAL_MODULE := dexlist
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_PATH := $(TARGET_OUT_OPTIONAL_EXECUTABLES)
-include $(BUILD_EXECUTABLE)
-endif # !SDK_ONLY
-
-##
-## Build the host command line tool dexlist.
-##
-
-include $(CLEAR_VARS)
-LOCAL_CPP_EXTENSION := cc
-LOCAL_SRC_FILES := $(dexlist_src_files)
-LOCAL_C_INCLUDES := $(dexlist_c_includes)
-LOCAL_CFLAGS += -Wall
-LOCAL_SHARED_LIBRARIES += $(dexlist_libraries)
-LOCAL_MODULE := dexlist
-LOCAL_MULTILIB := $(ART_MULTILIB_OVERRIDE_host)
-include $(BUILD_HOST_EXECUTABLE)
diff --git a/disassembler/Android.bp b/disassembler/Android.bp
new file mode 100644
index 0000000..d06e4de
--- /dev/null
+++ b/disassembler/Android.bp
@@ -0,0 +1,58 @@
+//
+// Copyright (C) 2012 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+art_cc_defaults {
+ name: "libart-disassembler-defaults",
+ defaults: ["art_defaults"],
+ host_supported: true,
+ clang: true,
+ srcs: [
+ "disassembler.cc",
+ "disassembler_arm.cc",
+ "disassembler_arm64.cc",
+ "disassembler_mips.cc",
+ "disassembler_x86.cc",
+ ],
+
+ shared_libs: [
+ "liblog",
+ "libbase",
+ ],
+ export_include_dirs: ["."],
+}
+
+art_cc_library {
+ name: "libart-disassembler",
+ defaults: ["libart-disassembler-defaults"],
+ shared_libs: [
+ "libart",
+ // For disassembler_arm64.
+ "libvixld-arm64",
+ ],
+}
+
+art_cc_library {
+ name: "libartd-disassembler",
+ defaults: [
+ "libart-disassembler-defaults",
+ "art_debug_defaults",
+ ],
+ shared_libs: [
+ "libartd",
+ // For disassembler_arm64.
+ "libvixld-arm64",
+ ],
+}
diff --git a/disassembler/Android.mk b/disassembler/Android.mk
deleted file mode 100644
index db327fc..0000000
--- a/disassembler/Android.mk
+++ /dev/null
@@ -1,116 +0,0 @@
-#
-# Copyright (C) 2012 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-LOCAL_PATH := $(call my-dir)
-
-include art/build/Android.common_build.mk
-
-LIBART_DISASSEMBLER_SRC_FILES := \
- disassembler.cc \
- disassembler_arm.cc \
- disassembler_arm64.cc \
- disassembler_mips.cc \
- disassembler_x86.cc
-
-# $(1): target or host
-# $(2): ndebug or debug
-define build-libart-disassembler
- ifneq ($(1),target)
- ifneq ($(1),host)
- $$(error expected target or host for argument 1, received $(1))
- endif
- endif
- ifneq ($(2),ndebug)
- ifneq ($(2),debug)
- $$(error expected ndebug or debug for argument 2, received $(2))
- endif
- endif
-
- art_target_or_host := $(1)
- art_ndebug_or_debug := $(2)
-
- include $(CLEAR_VARS)
- ifeq ($$(art_target_or_host),host)
- LOCAL_IS_HOST_MODULE := true
- endif
- LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
- ifeq ($$(art_ndebug_or_debug),ndebug)
- LOCAL_MODULE := libart-disassembler
- else # debug
- LOCAL_MODULE := libartd-disassembler
- endif
-
- LOCAL_MODULE_TAGS := optional
- LOCAL_MODULE_CLASS := SHARED_LIBRARIES
-
- LOCAL_SRC_FILES := $$(LIBART_DISASSEMBLER_SRC_FILES)
-
- ifeq ($$(art_target_or_host),target)
- LOCAL_CLANG := $(ART_TARGET_CLANG)
- $(call set-target-local-cflags-vars,$(2))
- else # host
- LOCAL_CLANG := $(ART_HOST_CLANG)
- LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
- LOCAL_ASFLAGS += $(ART_HOST_ASFLAGS)
- ifeq ($$(art_ndebug_or_debug),debug)
- LOCAL_CFLAGS += $(ART_HOST_DEBUG_CFLAGS)
- LOCAL_ASFLAGS += $(ART_HOST_DEBUG_ASFLAGS)
- else
- LOCAL_CFLAGS += $(ART_HOST_NON_DEBUG_CFLAGS)
- LOCAL_ASFLAGS += $(ART_HOST_NON_DEBUG_ASFLAGS)
- endif
- endif
-
- LOCAL_SHARED_LIBRARIES += liblog
- ifeq ($$(art_ndebug_or_debug),debug)
- LOCAL_SHARED_LIBRARIES += libartd
- else
- LOCAL_SHARED_LIBRARIES += libart
- endif
-
- LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime
- LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)
- LOCAL_MULTILIB := both
-
- LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
- LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
- LOCAL_NATIVE_COVERAGE := $(ART_COVERAGE)
- # For disassembler_arm64.
- ifeq ($$(art_ndebug_or_debug),debug)
- LOCAL_SHARED_LIBRARIES += libvixld-arm64
- else
- LOCAL_SHARED_LIBRARIES += libvixl-arm64
- endif
- ifeq ($$(art_target_or_host),target)
- include $(BUILD_SHARED_LIBRARY)
- else # host
- include $(BUILD_HOST_SHARED_LIBRARY)
- endif
-endef
-
-ifeq ($(ART_BUILD_TARGET_NDEBUG),true)
- $(eval $(call build-libart-disassembler,target,ndebug))
-endif
-ifeq ($(ART_BUILD_TARGET_DEBUG),true)
- $(eval $(call build-libart-disassembler,target,debug))
-endif
-# We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target.
-ifeq ($(ART_BUILD_HOST_NDEBUG),true)
- $(eval $(call build-libart-disassembler,host,ndebug))
-endif
-ifeq ($(ART_BUILD_HOST_DEBUG),true)
- $(eval $(call build-libart-disassembler,host,debug))
-endif
diff --git a/disassembler/disassembler.cc b/disassembler/disassembler.cc
index bcd0d16..8eecc62 100644
--- a/disassembler/disassembler.cc
+++ b/disassembler/disassembler.cc
@@ -18,15 +18,23 @@
#include <ostream>
-#include "base/logging.h"
-#include "base/stringprintf.h"
+#include "android-base/logging.h"
+#include "android-base/stringprintf.h"
+
#include "disassembler_arm.h"
#include "disassembler_arm64.h"
#include "disassembler_mips.h"
#include "disassembler_x86.h"
+using android::base::StringPrintf;
+
namespace art {
+Disassembler::Disassembler(DisassemblerOptions* disassembler_options)
+ : disassembler_options_(disassembler_options) {
+ CHECK(disassembler_options_ != nullptr);
+}
+
Disassembler* Disassembler::Create(InstructionSet instruction_set, DisassemblerOptions* options) {
if (instruction_set == kArm || instruction_set == kThumb2) {
return new arm::DisassemblerArm(options);
@@ -39,7 +47,7 @@
} else if (instruction_set == kX86_64) {
return new x86::DisassemblerX86(options, true);
} else {
- UNIMPLEMENTED(FATAL) << "no disassembler for " << instruction_set;
+ UNIMPLEMENTED(FATAL) << static_cast<uint32_t>(instruction_set);
return nullptr;
}
}
diff --git a/disassembler/disassembler.h b/disassembler/disassembler.h
index 86793cc..1ef456c 100644
--- a/disassembler/disassembler.h
+++ b/disassembler/disassembler.h
@@ -21,8 +21,9 @@
#include <iosfwd>
+#include "android-base/macros.h"
+
#include "arch/instruction_set.h"
-#include "base/macros.h"
namespace art {
@@ -81,10 +82,7 @@
}
protected:
- explicit Disassembler(DisassemblerOptions* disassembler_options)
- : disassembler_options_(disassembler_options) {
- CHECK(disassembler_options_ != nullptr);
- }
+ explicit Disassembler(DisassemblerOptions* disassembler_options);
std::string FormatInstructionPointer(const uint8_t* begin);
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index a47b6ad..c3e288d 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -21,10 +21,13 @@
#include <ostream>
#include <sstream>
+#include "android-base/logging.h"
+#include "android-base/stringprintf.h"
+
#include "arch/arm/registers_arm.h"
#include "base/bit_utils.h"
-#include "base/logging.h"
-#include "base/stringprintf.h"
+
+using android::base::StringPrintf;
namespace art {
namespace arm {
diff --git a/disassembler/disassembler_arm64.cc b/disassembler/disassembler_arm64.cc
index 80bacb2..49b9623 100644
--- a/disassembler/disassembler_arm64.cc
+++ b/disassembler/disassembler_arm64.cc
@@ -20,8 +20,10 @@
#include <sstream>
-#include "base/logging.h"
-#include "base/stringprintf.h"
+#include "android-base/logging.h"
+#include "android-base/stringprintf.h"
+
+using android::base::StringPrintf;
using namespace vixl::aarch64; // NOLINT(build/namespaces)
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index 02c6d71..9a73f29 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -19,8 +19,10 @@
#include <ostream>
#include <sstream>
-#include "base/logging.h"
-#include "base/stringprintf.h"
+#include "android-base/logging.h"
+#include "android-base/stringprintf.h"
+
+using android::base::StringPrintf;
namespace art {
namespace mips {
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index 2ca84e5..9f49ec6 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -21,8 +21,10 @@
#include <ostream>
#include <sstream>
-#include "base/logging.h"
-#include "base/stringprintf.h"
+#include "android-base/logging.h"
+#include "android-base/stringprintf.h"
+
+using android::base::StringPrintf;
namespace art {
namespace x86 {
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index 21a0ca0..106cf2f 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -59,7 +59,7 @@
image_diff_pid_(image_diff_pid),
zygote_diff_pid_(zygote_diff_pid) {}
- bool Dump() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool Dump() REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostream& os = *os_;
os << "IMAGE LOCATION: " << image_location_ << "\n\n";
@@ -98,7 +98,7 @@
}
bool DumpImageDiff(pid_t image_diff_pid, pid_t zygote_diff_pid)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostream& os = *os_;
{
@@ -145,7 +145,7 @@
}
static std::string PrettyFieldValue(ArtField* field, mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostringstream oss;
switch (field->GetTypeAsPrimitiveType()) {
case Primitive::kPrimNot: {
@@ -217,7 +217,7 @@
void DiffObjectContents(mirror::Object* obj,
uint8_t* remote_bytes,
- std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_) {
+ std::ostream& os) REQUIRES_SHARED(Locks::mutator_lock_) {
const char* tabs = " ";
// Attempt to find fields for all dirty bytes.
mirror::Class* klass = obj->GetClass();
@@ -283,7 +283,7 @@
bool DumpImageDiffMap(pid_t image_diff_pid,
pid_t zygote_diff_pid,
const backtrace_map_t& boot_map)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostream& os = *os_;
const PointerSize pointer_size = InstructionSetPointerSize(
Runtime::Current()->GetInstructionSet());
@@ -867,7 +867,7 @@
}
static std::string GetClassDescriptor(mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(klass != nullptr);
std::string descriptor;
diff --git a/oatdump/Android.bp b/oatdump/Android.bp
new file mode 100644
index 0000000..b01bf51
--- /dev/null
+++ b/oatdump/Android.bp
@@ -0,0 +1,89 @@
+//
+// Copyright (C) 2011 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_defaults {
+ name: "oatdump-defaults",
+ defaults: ["art_defaults"],
+ host_supported: true,
+ srcs: ["oatdump.cc"],
+ target: {
+ android: {
+ shared_libs: ["libcutils"],
+ },
+ },
+ include_dirs: ["art/cmdline"],
+}
+
+art_cc_binary {
+ name: "oatdump",
+ defaults: ["oatdump-defaults"],
+ shared_libs: [
+ "libart",
+ "libart-compiler",
+ "libart-disassembler",
+ ],
+}
+
+art_cc_binary {
+ name: "oatdumpd",
+ defaults: [
+ "art_debug_defaults",
+ "oatdump-defaults",
+ ],
+ shared_libs: [
+ "libartd",
+ "libartd-compiler",
+ "libartd-disassembler",
+ ],
+}
+
+art_cc_binary {
+ name: "oatdumps",
+ defaults: ["oatdump-defaults"],
+ target: {
+ darwin: {
+ enabled: false,
+ },
+ },
+ static_libs: [
+ "libart",
+ "libart-compiler",
+ "libart-disassembler",
+ "libvixl-arm",
+ "libvixl-arm64",
+ ] + art_static_dependencies,
+}
+
+art_cc_binary {
+ name: "oatdumpds",
+ defaults: [
+ "art_debug_defaults",
+ "oatdump-defaults",
+ ],
+ target: {
+ darwin: {
+ enabled: false,
+ },
+ },
+ static_libs: [
+ "libartd",
+ "libartd-compiler",
+ "libartd-disassembler",
+ "libvixld-arm",
+ "libvixld-arm64",
+ ] + art_static_dependencies,
+}
+
diff --git a/oatdump/Android.mk b/oatdump/Android.mk
index 5c75f20..7be8a8d 100644
--- a/oatdump/Android.mk
+++ b/oatdump/Android.mk
@@ -16,19 +16,6 @@
LOCAL_PATH := $(call my-dir)
-include art/build/Android.executable.mk
-
-OATDUMP_SRC_FILES := \
- oatdump.cc
-
-# Build variants {target,host} x {debug,ndebug}
-$(eval $(call build-art-multi-executable,oatdump,$(OATDUMP_SRC_FILES),libart-compiler libart-disassembler,libcutils,,art/compiler art/disassembler))
-
-########################################################################
-# oatdump targets
-
-ART_DUMP_OAT_PATH ?= $(OUT_DIR)
-
OATDUMP := $(HOST_OUT_EXECUTABLES)/oatdump$(HOST_EXECUTABLE_SUFFIX)
OATDUMPD := $(HOST_OUT_EXECUTABLES)/oatdumpd$(HOST_EXECUTABLE_SUFFIX)
# TODO: for now, override with debug version for better error reporting
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 96c8e94..c87a18b 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -514,7 +514,7 @@
return oat_file_.GetOatHeader().GetInstructionSet();
}
- const void* GetQuickOatCode(ArtMethod* m) SHARED_REQUIRES(Locks::mutator_lock_) {
+ const void* GetQuickOatCode(ArtMethod* m) REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < oat_dex_files_.size(); i++) {
const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
CHECK(oat_dex_file != nullptr);
@@ -1268,7 +1268,7 @@
DCHECK(options_.class_loader_ != nullptr);
return verifier::MethodVerifier::VerifyMethodAndDump(
soa.Self(), vios, dex_method_idx, dex_file, dex_cache, *options_.class_loader_,
- &class_def, code_item, nullptr, method_access_flags);
+ class_def, code_item, nullptr, method_access_flags);
}
return nullptr;
@@ -1424,7 +1424,7 @@
image_header_(image_header),
oat_dumper_options_(oat_dumper_options) {}
- bool Dump() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool Dump() REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostream& os = *os_;
std::ostream& indent_os = vios_.Stream();
@@ -1671,7 +1671,7 @@
public:
explicit DumpArtMethodVisitor(ImageDumper* image_dumper) : image_dumper_(image_dumper) {}
- virtual void Visit(ArtMethod* method) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ virtual void Visit(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostream& indent_os = image_dumper_->vios_.Stream();
indent_os << method << " " << " ArtMethod: " << PrettyMethod(method) << "\n";
image_dumper_->DumpMethod(method, indent_os);
@@ -1683,7 +1683,7 @@
};
static void PrettyObjectValue(std::ostream& os, mirror::Class* type, mirror::Object* value)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(type != nullptr);
if (value == nullptr) {
os << StringPrintf("null %s\n", PrettyDescriptor(type).c_str());
@@ -1700,7 +1700,7 @@
}
static void PrintField(std::ostream& os, ArtField* field, mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
os << StringPrintf("%s: ", field->GetName());
switch (field->GetTypeAsPrimitiveType()) {
case Primitive::kPrimLong:
@@ -1753,7 +1753,7 @@
}
static void DumpFields(std::ostream& os, mirror::Object* obj, mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Class* super = klass->GetSuperClass();
if (super != nullptr) {
DumpFields(os, obj, super);
@@ -1767,7 +1767,7 @@
return image_space_.Contains(object);
}
- const void* GetQuickOatCodeBegin(ArtMethod* m) SHARED_REQUIRES(Locks::mutator_lock_) {
+ const void* GetQuickOatCodeBegin(ArtMethod* m) REQUIRES_SHARED(Locks::mutator_lock_) {
const void* quick_code = m->GetEntryPointFromQuickCompiledCodePtrSize(
image_header_.GetPointerSize());
if (Runtime::Current()->GetClassLinker()->IsQuickResolutionStub(quick_code)) {
@@ -1780,7 +1780,7 @@
}
uint32_t GetQuickOatCodeSize(ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const uint32_t* oat_code_begin = reinterpret_cast<const uint32_t*>(GetQuickOatCodeBegin(m));
if (oat_code_begin == nullptr) {
return 0;
@@ -1789,7 +1789,7 @@
}
const void* GetQuickOatCodeEnd(ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const uint8_t* oat_code_begin = reinterpret_cast<const uint8_t*>(GetQuickOatCodeBegin(m));
if (oat_code_begin == nullptr) {
return nullptr;
@@ -1797,7 +1797,7 @@
return oat_code_begin + GetQuickOatCodeSize(m);
}
- static void Callback(mirror::Object* obj, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
DCHECK(arg != nullptr);
ImageDumper* state = reinterpret_cast<ImageDumper*>(arg);
@@ -1969,7 +1969,7 @@
}
void DumpMethod(ArtMethod* method, std::ostream& indent_os)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(method != nullptr);
const void* quick_oat_code_begin = GetQuickOatCodeBegin(method);
const void* quick_oat_code_end = GetQuickOatCodeEnd(method);
@@ -2149,7 +2149,7 @@
}
void DumpOutliers(std::ostream& os)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
size_t sum_of_sizes = 0;
size_t sum_of_sizes_squared = 0;
size_t sum_of_expansion = 0;
@@ -2253,7 +2253,7 @@
}
void Dump(std::ostream& os, std::ostream& indent_os)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
{
os << "art_file_bytes = " << PrettySize(file_bytes) << "\n\n"
<< "art_file_bytes = header_bytes + object_bytes + alignment_bytes\n";
@@ -2370,7 +2370,7 @@
static int DumpImage(gc::space::ImageSpace* image_space,
OatDumperOptions* options,
- std::ostream* os) SHARED_REQUIRES(Locks::mutator_lock_) {
+ std::ostream* os) REQUIRES_SHARED(Locks::mutator_lock_) {
const ImageHeader& image_header = image_space->GetImageHeader();
if (!image_header.IsValid()) {
fprintf(stderr, "Invalid image header %s\n", image_space->GetImageLocation().c_str());
diff --git a/oatdump/oatdump_test.cc b/oatdump/oatdump_test.cc
index db97055..22db818 100644
--- a/oatdump/oatdump_test.cc
+++ b/oatdump/oatdump_test.cc
@@ -42,13 +42,22 @@
core_oat_location_ = GetSystemImageFilename(GetCoreOatLocation().c_str(), kRuntimeISA);
}
+ // Linking flavor.
+ enum Flavor {
+ kDynamic, // oatdump(d)
+ kStatic, // oatdump(d)s
+ };
+
// Returns path to the oatdump binary.
- std::string GetOatDumpFilePath() {
+ std::string GetOatDumpFilePath(Flavor flavor) {
std::string root = GetTestAndroidRoot();
root += "/bin/oatdump";
if (kIsDebugBuild) {
root += "d";
}
+ if (flavor == kStatic) {
+ root += "s";
+ }
return root;
}
@@ -58,12 +67,19 @@
kModeSymbolize,
};
+ // Display style.
+ enum Display {
+ kListOnly,
+ kListAndCode
+ };
+
// Run the test with custom arguments.
- bool Exec(Mode mode,
+ bool Exec(Flavor flavor,
+ Mode mode,
const std::vector<std::string>& args,
- bool list_only,
+ Display display,
std::string* error_msg) {
- std::string file_path = GetOatDumpFilePath();
+ std::string file_path = GetOatDumpFilePath(flavor);
EXPECT_TRUE(OS::FileExists(file_path.c_str())) << file_path << " should be a valid file path";
@@ -81,7 +97,7 @@
expected_prefixes.push_back("LOCATION:");
expected_prefixes.push_back("MAGIC:");
expected_prefixes.push_back("DEX FILE COUNT:");
- if (!list_only) {
+ if (display == kListAndCode) {
// Code and dex code do not show up if list only.
expected_prefixes.push_back("DEX CODE:");
expected_prefixes.push_back("CODE:");
@@ -205,37 +221,73 @@
#if !defined(__arm__) && !defined(__mips__)
TEST_F(OatDumpTest, TestImage) {
std::string error_msg;
- ASSERT_TRUE(Exec(kModeArt, {}, /*list_only*/ false, &error_msg)) << error_msg;
+ ASSERT_TRUE(Exec(kDynamic, kModeArt, {}, kListAndCode, &error_msg)) << error_msg;
+}
+TEST_F(OatDumpTest, TestImageStatic) {
+ TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
+ std::string error_msg;
+ ASSERT_TRUE(Exec(kStatic, kModeArt, {}, kListAndCode, &error_msg)) << error_msg;
}
TEST_F(OatDumpTest, TestOatImage) {
std::string error_msg;
- ASSERT_TRUE(Exec(kModeOat, {}, /*list_only*/ false, &error_msg)) << error_msg;
+ ASSERT_TRUE(Exec(kDynamic, kModeOat, {}, kListAndCode, &error_msg)) << error_msg;
+}
+TEST_F(OatDumpTest, TestOatImageStatic) {
+ TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
+ std::string error_msg;
+ ASSERT_TRUE(Exec(kStatic, kModeOat, {}, kListAndCode, &error_msg)) << error_msg;
}
TEST_F(OatDumpTest, TestNoDumpVmap) {
std::string error_msg;
- ASSERT_TRUE(Exec(kModeArt, {"--no-dump:vmap"}, /*list_only*/ false, &error_msg)) << error_msg;
+ ASSERT_TRUE(Exec(kDynamic, kModeArt, {"--no-dump:vmap"}, kListAndCode, &error_msg)) << error_msg;
+}
+TEST_F(OatDumpTest, TestNoDumpVmapStatic) {
+ TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
+ std::string error_msg;
+ ASSERT_TRUE(Exec(kStatic, kModeArt, {"--no-dump:vmap"}, kListAndCode, &error_msg)) << error_msg;
}
TEST_F(OatDumpTest, TestNoDisassemble) {
std::string error_msg;
- ASSERT_TRUE(Exec(kModeArt, {"--no-disassemble"}, /*list_only*/ false, &error_msg)) << error_msg;
+ ASSERT_TRUE(Exec(kDynamic, kModeArt, {"--no-disassemble"}, kListAndCode, &error_msg))
+ << error_msg;
+}
+TEST_F(OatDumpTest, TestNoDisassembleStatic) {
+ TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
+ std::string error_msg;
+ ASSERT_TRUE(Exec(kStatic, kModeArt, {"--no-disassemble"}, kListAndCode, &error_msg)) << error_msg;
}
TEST_F(OatDumpTest, TestListClasses) {
std::string error_msg;
- ASSERT_TRUE(Exec(kModeArt, {"--list-classes"}, /*list_only*/ true, &error_msg)) << error_msg;
+ ASSERT_TRUE(Exec(kDynamic, kModeArt, {"--list-classes"}, kListOnly, &error_msg)) << error_msg;
+}
+TEST_F(OatDumpTest, TestListClassesStatic) {
+ TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
+ std::string error_msg;
+ ASSERT_TRUE(Exec(kStatic, kModeArt, {"--list-classes"}, kListOnly, &error_msg)) << error_msg;
}
TEST_F(OatDumpTest, TestListMethods) {
std::string error_msg;
- ASSERT_TRUE(Exec(kModeArt, {"--list-methods"}, /*list_only*/ true, &error_msg)) << error_msg;
+ ASSERT_TRUE(Exec(kDynamic, kModeArt, {"--list-methods"}, kListOnly, &error_msg)) << error_msg;
+}
+TEST_F(OatDumpTest, TestListMethodsStatic) {
+ TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
+ std::string error_msg;
+ ASSERT_TRUE(Exec(kStatic, kModeArt, {"--list-methods"}, kListOnly, &error_msg)) << error_msg;
}
TEST_F(OatDumpTest, TestSymbolize) {
std::string error_msg;
- ASSERT_TRUE(Exec(kModeSymbolize, {}, /*list_only*/ true, &error_msg)) << error_msg;
+ ASSERT_TRUE(Exec(kDynamic, kModeSymbolize, {}, kListOnly, &error_msg)) << error_msg;
+}
+TEST_F(OatDumpTest, TestSymbolizeStatic) {
+ TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
+ std::string error_msg;
+ ASSERT_TRUE(Exec(kStatic, kModeSymbolize, {}, kListOnly, &error_msg)) << error_msg;
}
#endif
} // namespace art
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 3f6531b..b7ce02c 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -462,7 +462,7 @@
public:
explicit PatchOatArtFieldVisitor(PatchOat* patch_oat) : patch_oat_(patch_oat) {}
- void Visit(ArtField* field) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Visit(ArtField* field) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* const dest = patch_oat_->RelocatedCopyOf(field);
dest->SetDeclaringClass(patch_oat_->RelocatedAddressOfPointer(field->GetDeclaringClass()));
}
@@ -480,7 +480,7 @@
public:
explicit PatchOatArtMethodVisitor(PatchOat* patch_oat) : patch_oat_(patch_oat) {}
- void Visit(ArtMethod* method) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Visit(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* const dest = patch_oat_->RelocatedCopyOf(method);
patch_oat_->FixupMethod(method, dest);
}
@@ -523,7 +523,7 @@
}
void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
*roots[i] = patch_oat_->RelocatedAddressOfPointer(*roots[i]);
}
@@ -531,7 +531,7 @@
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
roots[i]->Assign(patch_oat_->RelocatedAddressOfPointer(roots[i]->AsMirrorPtr()));
}
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index 64efea9d..a97b051 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -99,14 +99,14 @@
bool new_oat_out); // Output oat was newly created?
static void BitmapCallback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
reinterpret_cast<PatchOat*>(arg)->VisitObject(obj);
}
void VisitObject(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void FixupMethod(ArtMethod* object, ArtMethod* copy)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Patches oat in place, modifying the oat_file given to the constructor.
bool PatchElf();
@@ -115,18 +115,18 @@
template <typename ElfFileImpl>
bool PatchOatHeader(ElfFileImpl* oat_file);
- bool PatchImage(bool primary_image) SHARED_REQUIRES(Locks::mutator_lock_);
- void PatchArtFields(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_);
- void PatchArtMethods(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_);
- void PatchImTables(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool PatchImage(bool primary_image) REQUIRES_SHARED(Locks::mutator_lock_);
+ void PatchArtFields(const ImageHeader* image_header) REQUIRES_SHARED(Locks::mutator_lock_);
+ void PatchArtMethods(const ImageHeader* image_header) REQUIRES_SHARED(Locks::mutator_lock_);
+ void PatchImTables(const ImageHeader* image_header) REQUIRES_SHARED(Locks::mutator_lock_);
void PatchImtConflictTables(const ImageHeader* image_header)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void PatchInternedStrings(const ImageHeader* image_header)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void PatchClassTable(const ImageHeader* image_header)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void PatchDexFileArrays(mirror::ObjectArray<mirror::Object>* img_roots)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool WriteElf(File* out);
bool WriteImage(File* out);
diff --git a/runtime/Android.bp b/runtime/Android.bp
index c92df4e..59e4a15 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -97,7 +97,6 @@
"intern_table.cc",
"interpreter/interpreter.cc",
"interpreter/interpreter_common.cc",
- "interpreter/interpreter_goto_table_impl.cc",
"interpreter/interpreter_switch_impl.cc",
"interpreter/unstarted_runtime.cc",
"java_vm_ext.cc",
@@ -135,6 +134,7 @@
"native_bridge_art_interface.cc",
"native_stack_dump.cc",
"native/dalvik_system_DexFile.cc",
+ "native/dalvik_system_InMemoryDexClassLoader_DexData.cc",
"native/dalvik_system_VMDebug.cc",
"native/dalvik_system_VMRuntime.cc",
"native/dalvik_system_VMStack.cc",
@@ -220,6 +220,7 @@
"entrypoints/quick/quick_cast_entrypoints.cc",
"entrypoints/quick/quick_deoptimization_entrypoints.cc",
"entrypoints/quick/quick_dexcache_entrypoints.cc",
+ "entrypoints/quick/quick_entrypoints_enum.cc",
"entrypoints/quick/quick_field_entrypoints.cc",
"entrypoints/quick/quick_fillarray_entrypoints.cc",
"entrypoints/quick/quick_instrumentation_entrypoints.cc",
@@ -379,6 +380,8 @@
"liblz4",
// For liblog, atrace, properties, ashmem, set_sched_policy and socket_peer_is_trusted.
"libcutils",
+ // For common macros.
+ "libbase",
],
static: {
static_libs: ["libsigchain_dummy"],
@@ -387,6 +390,8 @@
shared_libs: ["libsigchain"],
},
export_include_dirs: ["."],
+ // ART's macros.h depends on libbase's macros.h.
+ export_shared_lib_headers: ["libbase"],
}
gensrcs {
@@ -449,8 +454,8 @@
art_cc_library {
name: "libartd",
defaults: [
- "libart_defaults",
- "art_debug_defaults",
+ "art_debug_defaults",
+ "libart_defaults",
],
}
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 492a12d..cb8edff 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -14,6 +14,9 @@
* limitations under the License.
*/
+#include <math.h>
+#include <string.h>
+
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_default_externs.h"
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index e25e93f..a3f053b 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -1086,25 +1086,6 @@
// Load the class (r2)
ldr r2, [r2, r0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
cbz r2, .Lart_quick_alloc_object_rosalloc_slow_path // Check null class
- // Check class status.
- ldr r3, [r2, #MIRROR_CLASS_STATUS_OFFSET]
- cmp r3, #MIRROR_CLASS_STATUS_INITIALIZED
- bne .Lart_quick_alloc_object_rosalloc_slow_path
- // Add a fake dependence from the
- // following access flag and size
- // loads to the status load.
- // This is to prevent those loads
- // from being reordered above the
- // status load and reading wrong
- // values (an alternative is to use
- // a load-acquire for the status).
- eor r3, r3, r3
- add r2, r2, r3
- // Check access flags has
- // kAccClassIsFinalizable
- ldr r3, [r2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
- tst r3, #ACCESS_FLAGS_CLASS_IS_FINALIZABLE
- bne .Lart_quick_alloc_object_rosalloc_slow_path
ldr r3, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] // Check if the thread local
// allocation stack has room.
@@ -1113,22 +1094,21 @@
cmp r3, r12
bhs .Lart_quick_alloc_object_rosalloc_slow_path
- ldr r3, [r2, #MIRROR_CLASS_OBJECT_SIZE_OFFSET] // Load the object size (r3)
+ ldr r3, [r2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (r3)
cmp r3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE // Check if the size is for a thread
- // local allocation
+ // local allocation. Also does the
+ // initialized and finalizable checks.
bhs .Lart_quick_alloc_object_rosalloc_slow_path
// Compute the rosalloc bracket index
- // from the size.
- // Align up the size by the rosalloc
- // bracket quantum size and divide
- // by the quantum size and subtract
- // by 1. This code is a shorter but
- // equivalent version.
- sub r3, r3, #1
- lsr r3, r3, #ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT
+ // from the size. Since the size is
+ // already aligned we can combine the
+ // two shifts together.
+ add r12, r9, r3, lsr #(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT - POINTER_SIZE_SHIFT)
+ // Subtract pointer size since ther
+ // are no runs for 0 byte allocations
+ // and the size is already aligned.
// Load the rosalloc run (r12)
- add r12, r9, r3, lsl #POINTER_SIZE_SHIFT
- ldr r12, [r12, #THREAD_ROSALLOC_RUNS_OFFSET]
+ ldr r12, [r12, #(THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)]
// Load the free list head (r3). This
// will be the return val.
ldr r3, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
@@ -1153,7 +1133,7 @@
// to later accesses to the class
// object. Alternatively we could use
// "ishst" if we use load-acquire for
- // the class status load.)
+ // the object size load.
// Needs to be done before pushing on
// allocation since Heap::VisitObjects
// relies on seeing the class pointer.
@@ -1191,25 +1171,6 @@
// Need to preserve r0 and r1 to the slow path.
.macro ALLOC_OBJECT_TLAB_FAST_PATH slowPathLabel
cbz r2, \slowPathLabel // Check null class
- // Check class status.
- ldr r3, [r2, #MIRROR_CLASS_STATUS_OFFSET]
- cmp r3, #MIRROR_CLASS_STATUS_INITIALIZED
- bne \slowPathLabel
- // Add a fake dependence from the
- // following access flag and size
- // loads to the status load.
- // This is to prevent those loads
- // from being reordered above the
- // status load and reading wrong
- // values (an alternative is to use
- // a load-acquire for the status).
- eor r3, r3, r3
- add r2, r2, r3
- // Check access flags has
- // kAccClassIsFinalizable.
- ldr r3, [r2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
- tst r3, #ACCESS_FLAGS_CLASS_IS_FINALIZABLE
- bne \slowPathLabel
// Load thread_local_pos (r12) and
// thread_local_end (r3) with ldrd.
// Check constraints for ldrd.
@@ -1218,16 +1179,10 @@
#endif
ldrd r12, r3, [r9, #THREAD_LOCAL_POS_OFFSET]
sub r12, r3, r12 // Compute the remaining buf size.
- ldr r3, [r2, #MIRROR_CLASS_OBJECT_SIZE_OFFSET] // Load the object size (r3).
- cmp r3, r12 // Check if it fits. OK to do this
- // before rounding up the object size
- // assuming the buf size alignment.
+ ldr r3, [r2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (r3).
+ cmp r3, r12 // Check if it fits.
bhi \slowPathLabel
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
- // Round up the object size by the
- // object alignment. (addr + 7) & ~7.
- add r3, r3, #OBJECT_ALIGNMENT_MASK
- and r3, r3, #OBJECT_ALIGNMENT_MASK_TOGGLED
// Reload old thread_local_pos (r0)
// for the return value.
ldr r0, [r9, #THREAD_LOCAL_POS_OFFSET]
@@ -1244,7 +1199,7 @@
// the fields of the class.
// Alternatively we could use "ishst"
// if we use load-acquire for the
- // class status load.)
+ // object size load.)
dmb ish
bx lr
.endm
@@ -1589,7 +1544,7 @@
*/
.extern artDeoptimizeFromCompiledCode
ENTRY art_quick_deoptimize_from_compiled_code
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r0
+ SETUP_SAVE_EVERYTHING_FRAME r0
mov r0, r9 @ Set up args.
blx artDeoptimizeFromCompiledCode @ artDeoptimizeFromCompiledCode(Thread*)
END art_quick_deoptimize_from_compiled_code
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 55b09c3..c2078f0 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -14,6 +14,9 @@
* limitations under the License.
*/
+#include <math.h>
+#include <string.h>
+
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_default_externs.h"
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 202846a..e0e1e81 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -19,6 +19,42 @@
#include "arch/quick_alloc_entrypoints.S"
+.macro SAVE_REG reg, offset
+ str \reg, [sp, #(\offset)]
+ .cfi_rel_offset \reg, (\offset)
+.endm
+
+.macro RESTORE_REG reg, offset
+ ldr \reg, [sp, #(\offset)]
+ .cfi_restore \reg
+.endm
+
+.macro SAVE_TWO_REGS reg1, reg2, offset
+ stp \reg1, \reg2, [sp, #(\offset)]
+ .cfi_rel_offset \reg1, (\offset)
+ .cfi_rel_offset \reg2, (\offset) + 8
+.endm
+
+.macro RESTORE_TWO_REGS reg1, reg2, offset
+ ldp \reg1, \reg2, [sp, #(\offset)]
+ .cfi_restore \reg1
+ .cfi_restore \reg2
+.endm
+
+.macro SAVE_TWO_REGS_INCREASE_FRAME reg1, reg2, frame_adjustment
+ stp \reg1, \reg2, [sp, #-(\frame_adjustment)]!
+ .cfi_adjust_cfa_offset (\frame_adjustment)
+ .cfi_rel_offset \reg1, 0
+ .cfi_rel_offset \reg2, 8
+.endm
+
+.macro RESTORE_TWO_REGS_DECREASE_FRAME reg1, reg2, frame_adjustment
+ ldp \reg1, \reg2, [sp], #(\frame_adjustment)
+ .cfi_restore \reg1
+ .cfi_restore \reg2
+ .cfi_adjust_cfa_offset -(\frame_adjustment)
+.endm
+
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kSaveAllCalleeSaves)
@@ -50,29 +86,12 @@
stp d14, d15, [sp, #64]
// GP callee-saves
- stp x19, x20, [sp, #80]
- .cfi_rel_offset x19, 80
- .cfi_rel_offset x20, 88
-
- stp x21, x22, [sp, #96]
- .cfi_rel_offset x21, 96
- .cfi_rel_offset x22, 104
-
- stp x23, x24, [sp, #112]
- .cfi_rel_offset x23, 112
- .cfi_rel_offset x24, 120
-
- stp x25, x26, [sp, #128]
- .cfi_rel_offset x25, 128
- .cfi_rel_offset x26, 136
-
- stp x27, x28, [sp, #144]
- .cfi_rel_offset x27, 144
- .cfi_rel_offset x28, 152
-
- stp x29, xLR, [sp, #160]
- .cfi_rel_offset x29, 160
- .cfi_rel_offset x30, 168
+ SAVE_TWO_REGS x19, x20, 80
+ SAVE_TWO_REGS x21, x22, 96
+ SAVE_TWO_REGS x23, x24, 112
+ SAVE_TWO_REGS x25, x26, 128
+ SAVE_TWO_REGS x27, x28, 144
+ SAVE_TWO_REGS x29, xLR, 160
// Store ArtMethod* Runtime::callee_save_methods_[kSaveAllCalleeSaves].
str xIP0, [sp]
@@ -106,25 +125,11 @@
// GP callee-saves.
// x20 paired with ArtMethod* - see below.
- stp x21, x22, [sp, #16]
- .cfi_rel_offset x21, 16
- .cfi_rel_offset x22, 24
-
- stp x23, x24, [sp, #32]
- .cfi_rel_offset x23, 32
- .cfi_rel_offset x24, 40
-
- stp x25, x26, [sp, #48]
- .cfi_rel_offset x25, 48
- .cfi_rel_offset x26, 56
-
- stp x27, x28, [sp, #64]
- .cfi_rel_offset x27, 64
- .cfi_rel_offset x28, 72
-
- stp x29, xLR, [sp, #80]
- .cfi_rel_offset x29, 80
- .cfi_rel_offset x30, 88
+ SAVE_TWO_REGS x21, x22, 16
+ SAVE_TWO_REGS x23, x24, 32
+ SAVE_TWO_REGS x25, x26, 48
+ SAVE_TWO_REGS x27, x28, 64
+ SAVE_TWO_REGS x29, xLR, 80
// Store ArtMethod* Runtime::callee_save_methods_[kSaveRefsOnly].
stp xIP0, x20, [sp]
@@ -138,28 +143,12 @@
// TODO: Probably no need to restore registers preserved by aapcs64.
.macro RESTORE_SAVE_REFS_ONLY_FRAME
// Callee-saves.
- ldr x20, [sp, #8]
- .cfi_restore x20
-
- ldp x21, x22, [sp, #16]
- .cfi_restore x21
- .cfi_restore x22
-
- ldp x23, x24, [sp, #32]
- .cfi_restore x23
- .cfi_restore x24
-
- ldp x25, x26, [sp, #48]
- .cfi_restore x25
- .cfi_restore x26
-
- ldp x27, x28, [sp, #64]
- .cfi_restore x27
- .cfi_restore x28
-
- ldp x29, xLR, [sp, #80]
- .cfi_restore x29
- .cfi_restore x30
+ RESTORE_REG x20, 8
+ RESTORE_TWO_REGS x21, x22, 16
+ RESTORE_TWO_REGS x23, x24, 32
+ RESTORE_TWO_REGS x25, x26, 48
+ RESTORE_TWO_REGS x27, x28, 64
+ RESTORE_TWO_REGS x29, xLR, 80
add sp, sp, #96
.cfi_adjust_cfa_offset -96
@@ -193,43 +182,19 @@
stp d6, d7, [sp, #64]
// Core args.
- stp x1, x2, [sp, #80]
- .cfi_rel_offset x1, 80
- .cfi_rel_offset x2, 88
-
- stp x3, x4, [sp, #96]
- .cfi_rel_offset x3, 96
- .cfi_rel_offset x4, 104
-
- stp x5, x6, [sp, #112]
- .cfi_rel_offset x5, 112
- .cfi_rel_offset x6, 120
+ SAVE_TWO_REGS x1, x2, 80
+ SAVE_TWO_REGS x3, x4, 96
+ SAVE_TWO_REGS x5, x6, 112
// x7, Callee-saves.
- stp x7, x20, [sp, #128]
- .cfi_rel_offset x7, 128
- .cfi_rel_offset x20, 136
-
- stp x21, x22, [sp, #144]
- .cfi_rel_offset x21, 144
- .cfi_rel_offset x22, 152
-
- stp x23, x24, [sp, #160]
- .cfi_rel_offset x23, 160
- .cfi_rel_offset x24, 168
-
- stp x25, x26, [sp, #176]
- .cfi_rel_offset x25, 176
- .cfi_rel_offset x26, 184
-
- stp x27, x28, [sp, #192]
- .cfi_rel_offset x27, 192
- .cfi_rel_offset x28, 200
+ SAVE_TWO_REGS x7, x20, 128
+ SAVE_TWO_REGS x21, x22, 144
+ SAVE_TWO_REGS x23, x24, 160
+ SAVE_TWO_REGS x25, x26, 176
+ SAVE_TWO_REGS x27, x28, 192
// x29(callee-save) and LR.
- stp x29, xLR, [sp, #208]
- .cfi_rel_offset x29, 208
- .cfi_rel_offset x30, 216
+ SAVE_TWO_REGS x29, xLR, 208
.endm
@@ -275,43 +240,19 @@
ldp d6, d7, [sp, #64]
// Core args.
- ldp x1, x2, [sp, #80]
- .cfi_restore x1
- .cfi_restore x2
-
- ldp x3, x4, [sp, #96]
- .cfi_restore x3
- .cfi_restore x4
-
- ldp x5, x6, [sp, #112]
- .cfi_restore x5
- .cfi_restore x6
+ RESTORE_TWO_REGS x1, x2, 80
+ RESTORE_TWO_REGS x3, x4, 96
+ RESTORE_TWO_REGS x5, x6, 112
// x7, Callee-saves.
- ldp x7, x20, [sp, #128]
- .cfi_restore x7
- .cfi_restore x20
-
- ldp x21, x22, [sp, #144]
- .cfi_restore x21
- .cfi_restore x22
-
- ldp x23, x24, [sp, #160]
- .cfi_restore x23
- .cfi_restore x24
-
- ldp x25, x26, [sp, #176]
- .cfi_restore x25
- .cfi_restore x26
-
- ldp x27, x28, [sp, #192]
- .cfi_restore x27
- .cfi_restore x28
+ RESTORE_TWO_REGS x7, x20, 128
+ RESTORE_TWO_REGS x21, x22, 144
+ RESTORE_TWO_REGS x23, x24, 160
+ RESTORE_TWO_REGS x25, x26, 176
+ RESTORE_TWO_REGS x27, x28, 192
// x29(callee-save) and LR.
- ldp x29, xLR, [sp, #208]
- .cfi_restore x29
- .cfi_restore x30
+ RESTORE_TWO_REGS x29, xLR, 208
add sp, sp, #224
.cfi_adjust_cfa_offset -224
@@ -351,68 +292,22 @@
str d31, [sp, #256]
// Save core registers.
- str x0, [sp, #264]
- .cfi_rel_offset x0, 264
-
- stp x1, x2, [sp, #272]
- .cfi_rel_offset x1, 272
- .cfi_rel_offset x2, 280
-
- stp x3, x4, [sp, #288]
- .cfi_rel_offset x3, 288
- .cfi_rel_offset x4, 296
-
- stp x5, x6, [sp, #304]
- .cfi_rel_offset x5, 304
- .cfi_rel_offset x6, 312
-
- stp x7, x8, [sp, #320]
- .cfi_rel_offset x7, 320
- .cfi_rel_offset x8, 328
-
- stp x9, x10, [sp, #336]
- .cfi_rel_offset x9, 336
- .cfi_rel_offset x10, 344
-
- stp x11, x12, [sp, #352]
- .cfi_rel_offset x11, 352
- .cfi_rel_offset x12, 360
-
- stp x13, x14, [sp, #368]
- .cfi_rel_offset x13, 368
- .cfi_rel_offset x14, 376
-
- stp x15, x16, [sp, #384]
- .cfi_rel_offset x15, 384
- .cfi_rel_offset x16, 392
-
- stp x17, x18, [sp, #400]
- .cfi_rel_offset x17, 400
- .cfi_rel_offset x18, 408
-
- stp x19, x20, [sp, #416]
- .cfi_rel_offset x19, 416
- .cfi_rel_offset x20, 424
-
- stp x21, x22, [sp, #432]
- .cfi_rel_offset x21, 432
- .cfi_rel_offset x22, 440
-
- stp x23, x24, [sp, #448]
- .cfi_rel_offset x23, 448
- .cfi_rel_offset x24, 456
-
- stp x25, x26, [sp, #464]
- .cfi_rel_offset x25, 464
- .cfi_rel_offset x26, 472
-
- stp x27, x28, [sp, #480]
- .cfi_rel_offset x27, 480
- .cfi_rel_offset x28, 488
-
- stp x29, xLR, [sp, #496]
- .cfi_rel_offset x29, 496
- .cfi_rel_offset x30, 504
+ SAVE_REG x0, 264
+ SAVE_TWO_REGS x1, x2, 272
+ SAVE_TWO_REGS x3, x4, 288
+ SAVE_TWO_REGS x5, x6, 304
+ SAVE_TWO_REGS x7, x8, 320
+ SAVE_TWO_REGS x9, x10, 336
+ SAVE_TWO_REGS x11, x12, 352
+ SAVE_TWO_REGS x13, x14, 368
+ SAVE_TWO_REGS x15, x16, 384
+ SAVE_TWO_REGS x17, x18, 400
+ SAVE_TWO_REGS x19, x20, 416
+ SAVE_TWO_REGS x21, x22, 432
+ SAVE_TWO_REGS x23, x24, 448
+ SAVE_TWO_REGS x25, x26, 464
+ SAVE_TWO_REGS x27, x28, 480
+ SAVE_TWO_REGS x29, xLR, 496
// art::Runtime** xIP0 = &art::Runtime::instance_
adrp xIP0, :got:_ZN3art7Runtime9instance_E
@@ -452,68 +347,22 @@
ldr d31, [sp, #256]
// Restore core registers.
- ldr x0, [sp, #264]
- .cfi_restore x0
-
- ldp x1, x2, [sp, #272]
- .cfi_restore x1
- .cfi_restore x2
-
- ldp x3, x4, [sp, #288]
- .cfi_restore x3
- .cfi_restore x4
-
- ldp x5, x6, [sp, #304]
- .cfi_restore x5
- .cfi_restore x6
-
- ldp x7, x8, [sp, #320]
- .cfi_restore x7
- .cfi_restore x8
-
- ldp x9, x10, [sp, #336]
- .cfi_restore x9
- .cfi_restore x10
-
- ldp x11, x12, [sp, #352]
- .cfi_restore x11
- .cfi_restore x12
-
- ldp x13, x14, [sp, #368]
- .cfi_restore x13
- .cfi_restore x14
-
- ldp x15, x16, [sp, #384]
- .cfi_restore x15
- .cfi_restore x16
-
- ldp x17, x18, [sp, #400]
- .cfi_restore x17
- .cfi_restore x18
-
- ldp x19, x20, [sp, #416]
- .cfi_restore x19
- .cfi_restore x20
-
- ldp x21, x22, [sp, #432]
- .cfi_restore x21
- .cfi_restore x22
-
- ldp x23, x24, [sp, #448]
- .cfi_restore x23
- .cfi_restore x24
-
- ldp x25, x26, [sp, #464]
- .cfi_restore x25
- .cfi_restore x26
-
- ldp x27, x28, [sp, #480]
- .cfi_restore x27
- .cfi_restore x28
-
- ldp x29, xLR, [sp, #496]
- .cfi_restore x29
- .cfi_restore x30
+ RESTORE_REG x0, 264
+ RESTORE_TWO_REGS x1, x2, 272
+ RESTORE_TWO_REGS x3, x4, 288
+ RESTORE_TWO_REGS x5, x6, 304
+ RESTORE_TWO_REGS x7, x8, 320
+ RESTORE_TWO_REGS x9, x10, 336
+ RESTORE_TWO_REGS x11, x12, 352
+ RESTORE_TWO_REGS x13, x14, 368
+ RESTORE_TWO_REGS x15, x16, 384
+ RESTORE_TWO_REGS x17, x18, 400
+ RESTORE_TWO_REGS x19, x20, 416
+ RESTORE_TWO_REGS x21, x22, 432
+ RESTORE_TWO_REGS x23, x24, 448
+ RESTORE_TWO_REGS x25, x26, 464
+ RESTORE_TWO_REGS x27, x28, 480
+ RESTORE_TWO_REGS x29, xLR, 496
add sp, sp, #512
.cfi_adjust_cfa_offset -512
@@ -1409,12 +1258,8 @@
ENTRY art_quick_check_cast
// Store arguments and link register
// Stack needs to be 16B aligned on calls.
- stp x0, x1, [sp,#-32]!
- .cfi_adjust_cfa_offset 32
- .cfi_rel_offset x0, 0
- .cfi_rel_offset x1, 8
- str xLR, [sp, #24]
- .cfi_rel_offset x30, 24
+ SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 32
+ SAVE_REG xLR, 24
// Call runtime code
bl artIsAssignableFromCode
@@ -1423,24 +1268,16 @@
cbz x0, .Lthrow_class_cast_exception
// Restore and return
- ldr xLR, [sp, #24]
- .cfi_restore x30
- ldp x0, x1, [sp], #32
- .cfi_restore x0
- .cfi_restore x1
- .cfi_adjust_cfa_offset -32
+ RESTORE_REG xLR, 24
+ RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32
ret
.cfi_adjust_cfa_offset 32 // Reset unwind info so following code unwinds.
.Lthrow_class_cast_exception:
// Restore
- ldr xLR, [sp, #24]
- .cfi_restore x30
- ldp x0, x1, [sp], #32
- .cfi_restore x0
- .cfi_restore x1
- .cfi_adjust_cfa_offset -32
+ RESTORE_REG xLR, 24
+ RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context
mov x2, xSELF // pass Thread::Current
@@ -1492,16 +1329,9 @@
#endif
.Lrb_slowpath\number:
// Store registers used in art_quick_aput_obj (x0-x4, LR), stack is 16B aligned.
- stp x0, x1, [sp, #-48]!
- .cfi_adjust_cfa_offset 48
- .cfi_rel_offset x0, 0
- .cfi_rel_offset x1, 8
- stp x2, x3, [sp, #16]
- .cfi_rel_offset x2, 16
- .cfi_rel_offset x3, 24
- stp x4, xLR, [sp, #32]
- .cfi_rel_offset x4, 32
- .cfi_rel_offset x30, 40
+ SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 48
+ SAVE_TWO_REGS x2, x3, 16
+ SAVE_TWO_REGS x4, xLR, 32
// mov x0, \xRef // pass ref in x0 (no-op for now since parameter ref is unused)
.ifnc \xObj, x1
@@ -1520,8 +1350,7 @@
POP_REG_NE x2, 16, \xDest
POP_REG_NE x3, 24, \xDest
POP_REG_NE x4, 32, \xDest
- ldr xLR, [sp, #40]
- .cfi_restore x30
+ RESTORE_REG xLR, 40
add sp, sp, #48
.cfi_adjust_cfa_offset -48
.Lrb_exit\number:
@@ -1587,13 +1416,8 @@
ret
.Lcheck_assignability:
// Store arguments and link register
- stp x0, x1, [sp,#-32]!
- .cfi_adjust_cfa_offset 32
- .cfi_rel_offset x0, 0
- .cfi_rel_offset x1, 8
- stp x2, xLR, [sp, #16]
- .cfi_rel_offset x2, 16
- .cfi_rel_offset x30, 24
+ SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 32
+ SAVE_TWO_REGS x2, xLR, 16
// Call runtime code
mov x0, x3 // Heap reference, 32b, "uncompress" = do nothing, already zero-extended
@@ -1604,13 +1428,8 @@
cbz x0, .Lthrow_array_store_exception
// Restore
- ldp x2, x30, [sp, #16]
- .cfi_restore x2
- .cfi_restore x30
- ldp x0, x1, [sp], #32
- .cfi_restore x0
- .cfi_restore x1
- .cfi_adjust_cfa_offset -32
+ RESTORE_TWO_REGS x2, xLR, 16
+ RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32
add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
// "Compress" = do nothing
@@ -1622,13 +1441,8 @@
ret
.cfi_adjust_cfa_offset 32 // 4 restores after cbz for unwinding.
.Lthrow_array_store_exception:
- ldp x2, x30, [sp, #16]
- .cfi_restore x2
- .cfi_restore x30
- ldp x0, x1, [sp], #32
- .cfi_restore x0
- .cfi_restore x1
- .cfi_adjust_cfa_offset -32
+ RESTORE_TWO_REGS x2, xLR, 16
+ RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
mov x1, x2 // Pass value.
@@ -1796,7 +1610,7 @@
ldr x1, [sp] // load referrer
ldr w2, [x1, #ART_METHOD_DECLARING_CLASS_OFFSET] // load declaring class
ldr x1, [x2, #DECLARING_CLASS_DEX_CACHE_STRINGS_OFFSET] // load string dex cache
- and x2, x0, #STRING_DEX_CACHE_SIZE_MINUS_ONE // get masked string index into x2
+ ubfx x2, x0, #0, #STRING_DEX_CACHE_HASH_BITS // get masked string index into x2
ldr x2, [x1, x2, lsl #STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT] // load dex cache pair into x2
cmp x0, x2, lsr #32 // compare against upper 32 bits
bne .Lart_quick_resolve_string_slow_path
@@ -1821,9 +1635,9 @@
ldr x3, [x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
tbnz x3, #LOCK_WORD_MARK_BIT_SHIFT, .Lart_quick_resolve_string_no_rb
// Save LR so that we can return, also x1 for alignment purposes.
- stp x1, xLR, [sp, #-16]! // Save x1, LR.
+ SAVE_TWO_REGS_INCREASE_FRAME x1, xLR, 16 // Save x1, LR.
bl artReadBarrierMark // Get the marked string back.
- ldp x1, xLR, [sp], #16 // Restore registers.
+ RESTORE_TWO_REGS_DECREASE_FRAME x1, xLR, 16 // Restore registers.
.Lart_quick_resolve_string_no_rb:
ret
@@ -1854,47 +1668,27 @@
// Load the class (x2)
ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
cbz x2, .Lart_quick_alloc_object_rosalloc_slow_path // Check null class
- // Check class status.
- ldr w3, [x2, #MIRROR_CLASS_STATUS_OFFSET]
- cmp x3, #MIRROR_CLASS_STATUS_INITIALIZED
- bne .Lart_quick_alloc_object_rosalloc_slow_path
- // Add a fake dependence from the
- // following access flag and size
- // loads to the status load.
- // This is to prevent those loads
- // from being reordered above the
- // status load and reading wrong
- // values (an alternative is to use
- // a load-acquire for the status).
- eor x3, x3, x3
- add x2, x2, x3
- // Check access flags has
- // kAccClassIsFinalizable
- ldr w3, [x2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
- tst x3, #ACCESS_FLAGS_CLASS_IS_FINALIZABLE
- bne .Lart_quick_alloc_object_rosalloc_slow_path
ldr x3, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] // Check if the thread local
// allocation stack has room.
// ldp won't work due to large offset.
ldr x4, [xSELF, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
cmp x3, x4
bhs .Lart_quick_alloc_object_rosalloc_slow_path
- ldr w3, [x2, #MIRROR_CLASS_OBJECT_SIZE_OFFSET] // Load the object size (x3)
+ ldr w3, [x2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x3)
cmp x3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE // Check if the size is for a thread
- // local allocation
+ // local allocation. Also does the
+ // finalizable and initialization
+ // checks.
bhs .Lart_quick_alloc_object_rosalloc_slow_path
// Compute the rosalloc bracket index
- // from the size.
- // Align up the size by the rosalloc
- // bracket quantum size and divide
- // by the quantum size and subtract
- // by 1. This code is a shorter but
- // equivalent version.
- sub x3, x3, #1
- lsr x3, x3, #ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT
- // Load the rosalloc run (x4)
- add x4, xSELF, x3, lsl #POINTER_SIZE_SHIFT
- ldr x4, [x4, #THREAD_ROSALLOC_RUNS_OFFSET]
+ // from the size. Since the size is
+ // already aligned we can combine the
+ // two shifts together.
+ add x4, xSELF, x3, lsr #(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT - POINTER_SIZE_SHIFT)
+ // Subtract pointer size since ther
+ // are no runs for 0 byte allocations
+ // and the size is already aligned.
+ ldr x4, [x4, #(THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)]
// Load the free list head (x3). This
// will be the return val.
ldr x3, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
@@ -1915,11 +1709,11 @@
str w2, [x3, #MIRROR_OBJECT_CLASS_OFFSET]
// Fence. This is "ish" not "ishst" so
// that it also ensures ordering of
- // the class status load with respect
+ // the object size load with respect
// to later accesses to the class
// object. Alternatively we could use
// "ishst" if we use load-acquire for
- // the class status load.)
+ // the class status load.
// Needs to be done before pushing on
// allocation since Heap::VisitObjects
// relies on seeing the class pointer.
@@ -2027,48 +1821,24 @@
ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED \slowPathLabel
.endm
+// TODO: delete ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED since it is the same as
+// ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED.
.macro ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED slowPathLabel
- ldr w3, [x2, #MIRROR_CLASS_STATUS_OFFSET] // Check class status.
- cmp x3, #MIRROR_CLASS_STATUS_INITIALIZED
- bne \slowPathLabel
- // Add a fake dependence from the
- // following access flag and size
- // loads to the status load.
- // This is to prevent those loads
- // from being reordered above the
- // status load and reading wrong
- // values (an alternative is to use
- // a load-acquire for the status).
- eor x3, x3, x3
- add x2, x2, x3
ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED \slowPathLabel
.endm
.macro ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED slowPathLabel
- // Check access flags has
- // kAccClassIsFinalizable.
- ldr w3, [x2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
- tbnz x3, #ACCESS_FLAGS_CLASS_IS_FINALIZABLE_BIT, \slowPathLabel
- // Load thread_local_pos (x4) and
- // thread_local_end (x5).
ldr x4, [xSELF, #THREAD_LOCAL_POS_OFFSET]
ldr x5, [xSELF, #THREAD_LOCAL_END_OFFSET]
- sub x6, x5, x4 // Compute the remaining buf size.
- ldr w7, [x2, #MIRROR_CLASS_OBJECT_SIZE_OFFSET] // Load the object size (x7).
- cmp x7, x6 // Check if it fits. OK to do this
- // before rounding up the object size
- // assuming the buf size alignment.
+ ldr w7, [x2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x7).
+ add x6, x4, x7 // Add object size to tlab pos.
+ cmp x6, x5 // Check if it fits, overflow works
+ // since the tlab pos and end are 32
+ // bit values.
bhi \slowPathLabel
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1.
- // Round up the object size by the
- // object alignment. (addr + 7) & ~7.
- add x7, x7, #OBJECT_ALIGNMENT_MASK
- and x7, x7, #OBJECT_ALIGNMENT_MASK_TOGGLED
- // Move old thread_local_pos to x0
- // for the return value.
mov x0, x4
- add x5, x0, x7
- str x5, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
+ str x6, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
ldr x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
add x5, x5, #1
str x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET]
@@ -2080,7 +1850,7 @@
// the fields of the class.
// Alternatively we could use "ishst"
// if we use load-acquire for the
- // class status load.)
+ // object size load.)
dmb ish
ret
.endm
@@ -2107,7 +1877,7 @@
END art_quick_alloc_object_tlab
// The common code for art_quick_alloc_object_*region_tlab
-.macro GENERATE_ALLOC_OBJECT_REGION_TLAB name, entrypoint, fast_path, is_resolved
+.macro GENERATE_ALLOC_OBJECT_REGION_TLAB name, entrypoint, fast_path, is_resolved, read_barrier
ENTRY \name
// Fast path region tlab allocation.
// x0: type_idx/resolved class/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
@@ -2123,31 +1893,34 @@
ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array
// Load the class (x2)
ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
+ // If the class is null, go slow path. The check is required to read the lock word.
+ cbz w2, .Lslow_path\name
.endif
+.if \read_barrier
// Most common case: GC is not marking.
ldr w3, [xSELF, #THREAD_IS_GC_MARKING_OFFSET]
cbnz x3, .Lmarking\name
+.endif
.Ldo_allocation\name:
\fast_path .Lslow_path\name
.Lmarking\name:
+.if \read_barrier
// GC is marking, check the lock word of the class for the mark bit.
- // If the class is null, go slow path. The check is required to read the lock word.
- cbz w2, .Lslow_path\name
// Class is not null, check mark bit in lock word.
ldr w3, [x2, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
// If the bit is not zero, do the allocation.
tbnz w3, #LOCK_WORD_MARK_BIT_SHIFT, .Ldo_allocation\name
// The read barrier slow path. Mark
// the class.
- stp x0, x1, [sp, #-32]! // Save registers (x0, x1, lr).
- str xLR, [sp, #16] // Align sp by 16 bytes.
+ SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 32 // Save registers (x0, x1, lr).
+ SAVE_REG xLR, 24 // Align sp by 16 bytes.
mov x0, x2 // Pass the class as the first param.
bl artReadBarrierMark
mov x2, x0 // Get the (marked) class back.
- ldp x0, x1, [sp, #0] // Restore registers.
- ldr xLR, [sp, #16]
- add sp, sp, #32
+ RESTORE_REG xLR, 24
+ RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32 // Restore registers.
b .Ldo_allocation\name
+.endif
.Lslow_path\name:
SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC.
mov x2, xSELF // Pass Thread::Current.
@@ -2157,9 +1930,14 @@
END \name
.endm
-GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_region_tlab, artAllocObjectFromCodeRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH, 0
-GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED, 1
-GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED, 1
+// Use ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED since the null check is already done in GENERATE_ALLOC_OBJECT_TLAB.
+GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_region_tlab, artAllocObjectFromCodeRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED, 0, 1
+// No read barrier for the resolved or initialized cases since the caller is responsible for the
+// read barrier due to the to-space invariant.
+GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED, 1, 0
+GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED, 1, 0
+
+// TODO: We could use this macro for the normal tlab allocator too.
// The common code for art_quick_alloc_array_*region_tlab
.macro GENERATE_ALLOC_ARRAY_REGION_TLAB name, entrypoint, fast_path, is_resolved
@@ -2524,7 +2302,7 @@
mov xLR, x1 // r1 is holding link register if we're to bounce to deoptimize
ldr d0, [sp, #8] // Restore floating-point result.
- ldr x0, [sp], 16 // Restore integer result, and drop stack area.
+ ldr x0, [sp], #16 // Restore integer result, and drop stack area.
.cfi_adjust_cfa_offset 16
POP_SAVE_REFS_ONLY_FRAME
@@ -2550,7 +2328,7 @@
*/
.extern artDeoptimizeFromCompiledCode
ENTRY art_quick_deoptimize_from_compiled_code
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
+ SETUP_SAVE_EVERYTHING_FRAME
mov x0, xSELF // Pass thread.
bl artDeoptimizeFromCompiledCode // artDeoptimizeFromCompiledCode(Thread*)
brk 0
@@ -2682,37 +2460,16 @@
ret
.Lslow_path_rb_\name:
// Save all potentially live caller-save core registers.
- stp x0, x1, [sp, #-368]!
- .cfi_adjust_cfa_offset 368
- .cfi_rel_offset x0, 0
- .cfi_rel_offset x1, 8
- stp x2, x3, [sp, #16]
- .cfi_rel_offset x2, 16
- .cfi_rel_offset x3, 24
- stp x4, x5, [sp, #32]
- .cfi_rel_offset x4, 32
- .cfi_rel_offset x5, 40
- stp x6, x7, [sp, #48]
- .cfi_rel_offset x6, 48
- .cfi_rel_offset x7, 56
- stp x8, x9, [sp, #64]
- .cfi_rel_offset x8, 64
- .cfi_rel_offset x9, 72
- stp x10, x11, [sp, #80]
- .cfi_rel_offset x10, 80
- .cfi_rel_offset x11, 88
- stp x12, x13, [sp, #96]
- .cfi_rel_offset x12, 96
- .cfi_rel_offset x13, 104
- stp x14, x15, [sp, #112]
- .cfi_rel_offset x14, 112
- .cfi_rel_offset x15, 120
- stp x16, x17, [sp, #128]
- .cfi_rel_offset x16, 128
- .cfi_rel_offset x17, 136
- stp x18, x19, [sp, #144]
- .cfi_rel_offset x18, 144
- .cfi_rel_offset x19, 152
+ SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 368
+ SAVE_TWO_REGS x2, x3, 16
+ SAVE_TWO_REGS x4, x5, 32
+ SAVE_TWO_REGS x6, x7, 48
+ SAVE_TWO_REGS x8, x9, 64
+ SAVE_TWO_REGS x10, x11, 80
+ SAVE_TWO_REGS x12, x13, 96
+ SAVE_TWO_REGS x14, x15, 112
+ SAVE_TWO_REGS x16, x17, 128
+ SAVE_TWO_REGS x18, x19, 144
// Save all potentially live caller-save floating-point registers.
stp d0, d1, [sp, #160]
stp d2, d3, [sp, #176]
@@ -2727,9 +2484,8 @@
stp d28, d29, [sp, #320]
stp d30, d31, [sp, #336]
// Save return address.
- str xLR, [sp, #352]
- .cfi_rel_offset x30, 352
- // (sp + #360 is a padding slot)
+ // (sp + #352 is a padding slot)
+ SAVE_REG xLR, 360
.ifnc \wreg, w0
mov w0, \wreg // Pass arg1 - obj from `wreg`
@@ -2765,8 +2521,7 @@
ldp d28, d29, [sp, #320]
ldp d30, d31, [sp, #336]
// Restore return address and remove padding.
- ldr xLR, [sp, #352]
- .cfi_restore x30
+ RESTORE_REG xLR, 360
add sp, sp, #368
.cfi_adjust_cfa_offset -368
.Lret_rb_\name:
diff --git a/runtime/arch/instruction_set.cc b/runtime/arch/instruction_set.cc
index b35e088..8f64dcd 100644
--- a/runtime/arch/instruction_set.cc
+++ b/runtime/arch/instruction_set.cc
@@ -19,11 +19,31 @@
// Explicitly include our own elf.h to avoid Linux and other dependencies.
#include "../elf.h"
#include "base/bit_utils.h"
+#include "base/logging.h"
#include "globals.h"
namespace art {
-const char* GetInstructionSetString(const InstructionSet isa) {
+void InstructionSetAbort(InstructionSet isa) {
+ switch (isa) {
+ case kArm:
+ case kThumb2:
+ case kArm64:
+ case kX86:
+ case kX86_64:
+ case kMips:
+ case kMips64:
+ case kNone:
+ LOG(FATAL) << "Unsupported instruction set " << isa;
+ UNREACHABLE();
+
+ default:
+ LOG(FATAL) << "Unknown ISA " << isa;
+ UNREACHABLE();
+ }
+}
+
+const char* GetInstructionSetString(InstructionSet isa) {
switch (isa) {
case kArm:
case kThumb2:
diff --git a/runtime/arch/instruction_set.h b/runtime/arch/instruction_set.h
index 917acc9..4a8bea4 100644
--- a/runtime/arch/instruction_set.h
+++ b/runtime/arch/instruction_set.h
@@ -21,7 +21,7 @@
#include <string>
#include "base/enums.h"
-#include "base/logging.h" // Logging is required for FATAL in the helper functions.
+#include "base/macros.h"
namespace art {
@@ -75,7 +75,6 @@
// X86 instruction alignment. This is the recommended alignment for maximum performance.
static constexpr size_t kX86Alignment = 16;
-
const char* GetInstructionSetString(InstructionSet isa);
// Note: Returns kNone when the string cannot be parsed to a known value.
@@ -83,6 +82,9 @@
InstructionSet GetInstructionSetFromELF(uint16_t e_machine, uint32_t e_flags);
+// Fatal logging out of line to keep the header clean of logging.h.
+NO_RETURN void InstructionSetAbort(InstructionSet isa);
+
static inline PointerSize GetInstructionSetPointerSize(InstructionSet isa) {
switch (isa) {
case kArm:
@@ -99,12 +101,8 @@
return kMipsPointerSize;
case kMips64:
return kMips64PointerSize;
- case kNone:
- LOG(FATAL) << "ISA kNone does not have pointer size.";
- UNREACHABLE();
default:
- LOG(FATAL) << "Unknown ISA " << isa;
- UNREACHABLE();
+ InstructionSetAbort(isa);
}
}
@@ -139,12 +137,8 @@
case kMips64:
return true;
- case kNone:
- LOG(FATAL) << "ISA kNone does not have bit width.";
- UNREACHABLE();
default:
- LOG(FATAL) << "Unknown ISA " << isa;
- UNREACHABLE();
+ InstructionSetAbort(isa);
}
}
@@ -168,12 +162,9 @@
return 4;
case kMips64:
return 8;
- case kNone:
- LOG(FATAL) << "ISA kNone does not have spills.";
- UNREACHABLE();
+
default:
- LOG(FATAL) << "Unknown ISA " << isa;
- UNREACHABLE();
+ InstructionSetAbort(isa);
}
}
@@ -193,12 +184,9 @@
return 4;
case kMips64:
return 8;
- case kNone:
- LOG(FATAL) << "ISA kNone does not have spills.";
- UNREACHABLE();
+
default:
- LOG(FATAL) << "Unknown ISA " << isa;
- UNREACHABLE();
+ InstructionSetAbort(isa);
}
}
diff --git a/runtime/arch/instruction_set_features_test.cc b/runtime/arch/instruction_set_features_test.cc
index fb38b47..7bbc709 100644
--- a/runtime/arch/instruction_set_features_test.cc
+++ b/runtime/arch/instruction_set_features_test.cc
@@ -22,6 +22,7 @@
#include "cutils/properties.h"
#endif
+#include "base/logging.h"
#include "base/stringprintf.h"
namespace art {
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 09f8849..38aa67c 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include <string.h>
+
#include "atomic.h"
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
diff --git a/runtime/arch/mips/instruction_set_features_mips.h b/runtime/arch/mips/instruction_set_features_mips.h
index 120dc1c..2d54988 100644
--- a/runtime/arch/mips/instruction_set_features_mips.h
+++ b/runtime/arch/mips/instruction_set_features_mips.h
@@ -18,6 +18,8 @@
#define ART_RUNTIME_ARCH_MIPS_INSTRUCTION_SET_FEATURES_MIPS_H_
#include "arch/instruction_set_features.h"
+#include "base/logging.h"
+#include "base/macros.h"
namespace art {
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 3d393f6..4bd1314 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -2094,7 +2094,7 @@
*/
.extern artDeoptimizeFromCompiledCode
ENTRY art_quick_deoptimize_from_compiled_code
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
+ SETUP_SAVE_EVERYTHING_FRAME
la $t9, artDeoptimizeFromCompiledCode
jalr $t9 # artDeoptimizeFromCompiledCode(Thread*)
# Returns caller method's frame size.
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index 34b0638..a037905 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include <string.h>
+
#include "atomic.h"
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 9774eb9..0bf2a35 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -2138,8 +2138,8 @@
* will long jump to the upcall with a special exception of -1.
*/
.extern artDeoptimizeFromCompiledCode
-ENTRY art_quick_deoptimize_from_compiled_code
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
+ENTRY_NO_GP art_quick_deoptimize_from_compiled_code
+ SETUP_SAVE_EVERYTHING_FRAME
jal artDeoptimizeFromCompiledCode # artDeoptimizeFromCompiledCode(Thread*, SP)
# Returns caller method's frame size.
move $a0, rSELF # pass Thread::current
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 10adb3a..507dbf0 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -1287,7 +1287,7 @@
static void GetSetBooleanStatic(ArtField* f, Thread* self,
ArtMethod* referrer, StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
constexpr size_t num_values = 5;
@@ -1318,7 +1318,7 @@
}
static void GetSetByteStatic(ArtField* f, Thread* self, ArtMethod* referrer,
StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
int8_t values[] = { -128, -64, 0, 64, 127 };
@@ -1349,7 +1349,7 @@
static void GetSetBooleanInstance(Handle<mirror::Object>* obj, ArtField* f, Thread* self,
ArtMethod* referrer, StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
uint8_t values[] = { 0, true, 2, 128, 0xFF };
@@ -1384,7 +1384,7 @@
}
static void GetSetByteInstance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, ArtMethod* referrer, StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
int8_t values[] = { -128, -64, 0, 64, 127 };
@@ -1419,7 +1419,7 @@
static void GetSetCharStatic(ArtField* f, Thread* self, ArtMethod* referrer,
StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
@@ -1449,7 +1449,7 @@
}
static void GetSetShortStatic(ArtField* f, Thread* self,
ArtMethod* referrer, StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
@@ -1480,7 +1480,7 @@
static void GetSetCharInstance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, ArtMethod* referrer, StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
@@ -1514,7 +1514,7 @@
}
static void GetSetShortInstance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, ArtMethod* referrer, StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
@@ -1549,7 +1549,7 @@
static void GetSet32Static(ArtField* f, Thread* self, ArtMethod* referrer,
StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
@@ -1585,7 +1585,7 @@
static void GetSet32Instance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, ArtMethod* referrer, StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
@@ -1626,7 +1626,7 @@
static void set_and_check_static(uint32_t f_idx, mirror::Object* val, Thread* self,
ArtMethod* referrer, StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
reinterpret_cast<size_t>(val),
0U,
@@ -1646,7 +1646,7 @@
static void GetSetObjStatic(ArtField* f, Thread* self, ArtMethod* referrer,
StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
set_and_check_static(f->GetDexFieldIndex(), nullptr, self, referrer, test);
@@ -1670,7 +1670,7 @@
static void set_and_check_instance(ArtField* f, mirror::Object* trg,
mirror::Object* val, Thread* self, ArtMethod* referrer,
StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
reinterpret_cast<size_t>(trg),
reinterpret_cast<size_t>(val),
@@ -1693,7 +1693,7 @@
static void GetSetObjInstance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, ArtMethod* referrer, StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
@@ -1716,7 +1716,7 @@
static void GetSet64Static(ArtField* f, Thread* self, ArtMethod* referrer,
StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) \
|| defined(__aarch64__)
uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
@@ -1749,7 +1749,7 @@
static void GetSet64Instance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, ArtMethod* referrer, StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \
defined(__aarch64__)
uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index bdf11da..0a10a3c 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include <math.h>
+
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_default_externs.h"
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 282f10d..646a80c 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -897,17 +897,6 @@
movl 0(%edx, %eax, COMPRESSED_REFERENCE_SIZE), %edx
testl %edx, %edx // Check null class
jz .Lart_quick_alloc_object_rosalloc_slow_path
- // Check class status
- cmpl LITERAL(MIRROR_CLASS_STATUS_INITIALIZED), MIRROR_CLASS_STATUS_OFFSET(%edx)
- jne .Lart_quick_alloc_object_rosalloc_slow_path
- // No fake dependence needed on x86
- // between status and flags load,
- // since each load is a load-acquire,
- // no loads reordering.
- // Check access flags has
- // kAccClassIsFinalizable
- testl LITERAL(ACCESS_FLAGS_CLASS_IS_FINALIZABLE), MIRROR_CLASS_ACCESS_FLAGS_OFFSET(%edx)
- jnz .Lart_quick_alloc_object_rosalloc_slow_path
movl %fs:THREAD_SELF_OFFSET, %ebx // ebx = thread
// Check if the thread local allocation
@@ -916,21 +905,19 @@
cmpl THREAD_LOCAL_ALLOC_STACK_END_OFFSET(%ebx), %edi
jae .Lart_quick_alloc_object_rosalloc_slow_path
- movl MIRROR_CLASS_OBJECT_SIZE_OFFSET(%edx), %edi // Load the object size (edi)
+ movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%edx), %edi // Load the object size (edi)
// Check if the size is for a thread
- // local allocation
+ // local allocation. Also does the
+ // finalizable and initialization check.
cmpl LITERAL(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), %edi
ja .Lart_quick_alloc_object_rosalloc_slow_path
- decl %edi
shrl LITERAL(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT), %edi // Calculate the rosalloc bracket index
// from object size.
- // Align up the size by the rosalloc
- // bracket quantum size and divide
- // by the quantum size and subtract
- // by 1. This code is a shorter but
- // equivalent version.
// Load thread local rosalloc run (ebx)
- movl THREAD_ROSALLOC_RUNS_OFFSET(%ebx, %edi, __SIZEOF_POINTER__), %ebx
+ // Subtract __SIZEOF_POINTER__ to subtract
+ // one from edi as there is no 0 byte run
+ // and the size is already aligned.
+ movl (THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)(%ebx, %edi, __SIZEOF_POINTER__), %ebx
// Load free_list head (edi),
// this will be the return value.
movl (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%ebx), %edi
@@ -990,32 +977,17 @@
MACRO1(ALLOC_OBJECT_TLAB_FAST_PATH, slowPathLabel)
testl %edx, %edx // Check null class
jz VAR(slowPathLabel)
- // Check class status.
- cmpl LITERAL(MIRROR_CLASS_STATUS_INITIALIZED), MIRROR_CLASS_STATUS_OFFSET(%edx)
- jne VAR(slowPathLabel)
- // No fake dependence needed on x86
- // between status and flags load,
- // since each load is a load-acquire,
- // no loads reordering.
- // Check access flags has
- // kAccClassIsFinalizable
- testl LITERAL(ACCESS_FLAGS_CLASS_IS_FINALIZABLE), MIRROR_CLASS_ACCESS_FLAGS_OFFSET(%edx)
- jnz VAR(slowPathLabel)
movl %fs:THREAD_SELF_OFFSET, %ebx // ebx = thread
movl THREAD_LOCAL_END_OFFSET(%ebx), %edi // Load thread_local_end.
subl THREAD_LOCAL_POS_OFFSET(%ebx), %edi // Compute the remaining buffer size.
- movl MIRROR_CLASS_OBJECT_SIZE_OFFSET(%edx), %esi // Load the object size.
- cmpl %edi, %esi // Check if it fits. OK to do this
- // before rounding up the object size
- // assuming the buf size alignment.
+ movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%edx), %esi // Load the object size.
+ cmpl %edi, %esi // Check if it fits.
ja VAR(slowPathLabel)
- addl LITERAL(OBJECT_ALIGNMENT_MASK), %esi // Align the size by 8. (addr + 7) & ~7.
- andl LITERAL(OBJECT_ALIGNMENT_MASK_TOGGLED), %esi
movl THREAD_LOCAL_POS_OFFSET(%ebx), %eax // Load thread_local_pos
// as allocated object.
addl %eax, %esi // Add the object size.
movl %esi, THREAD_LOCAL_POS_OFFSET(%ebx) // Update thread_local_pos.
- addl LITERAL(1), THREAD_LOCAL_OBJECTS_OFFSET(%ebx) // Increase thread_local_objects.
+ incl THREAD_LOCAL_OBJECTS_OFFSET(%ebx) // Increase thread_local_objects.
// Store the class pointer in the header.
// No fence needed for x86.
POISON_HEAP_REF edx
@@ -1109,18 +1081,15 @@
END_FUNCTION art_quick_alloc_object_region_tlab
DEFINE_FUNCTION art_quick_resolve_string
- SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx
- movl FRAME_SIZE_SAVE_REFS_ONLY(%esp), %ecx // get referrer
+ movl 4(%esp), %ecx // get referrer
movl ART_METHOD_DECLARING_CLASS_OFFSET(%ecx), %ecx // get declaring class
movl DECLARING_CLASS_DEX_CACHE_STRINGS_OFFSET(%ecx), %ecx // get string dex cache
movl LITERAL(STRING_DEX_CACHE_SIZE_MINUS_ONE), %edx
andl %eax, %edx
- shl LITERAL(STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT), %edx
- addl %ecx, %edx
- movlps (%edx), %xmm0 // load string idx and pointer to xmm0
- movd %xmm0, %ecx // extract pointer
- pshufd LITERAL(0x55), %xmm0, %xmm0 // shuffle index into lowest bits
- movd %xmm0, %edx // extract index
+ movlps (%ecx, %edx, STRING_DEX_CACHE_ELEMENT_SIZE), %xmm0 // load string idx and ptr to xmm0
+ movd %xmm0, %ecx // extract pointer
+ pshufd LITERAL(0x55), %xmm0, %xmm0 // shuffle index into lowest bits
+ movd %xmm0, %edx // extract index
cmp %edx, %eax
jne .Lart_quick_resolve_string_slow_path
movl %ecx, %eax
@@ -1128,10 +1097,10 @@
cmpl LITERAL(0), %fs:THREAD_IS_GC_MARKING_OFFSET
jne .Lart_quick_resolve_string_marking
#endif
- RESTORE_SAVE_REFS_ONLY_FRAME
ret
.Lart_quick_resolve_string_slow_path:
// Outgoing argument set up
+ SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx
subl LITERAL(8), %esp // push padding
CFI_ADJUST_CFA_OFFSET(8)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
@@ -1143,6 +1112,7 @@
RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
.Lart_quick_resolve_string_marking:
+ SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx
testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(%eax)
jnz .Lart_quick_resolve_string_no_rb
subl LITERAL(12), %esp // alignment padding
@@ -1960,7 +1930,7 @@
* will long jump to the interpreter bridge.
*/
DEFINE_FUNCTION art_quick_deoptimize_from_compiled_code
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME ebx, ebx
+ SETUP_SAVE_EVERYTHING_FRAME ebx, ebx
subl LITERAL(12), %esp // Align stack.
CFI_ADJUST_CFA_OFFSET(12)
pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index 42b9699..8c425d5 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include <math.h>
+
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_default_externs.h"
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index f941c52..5ea58af 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -935,17 +935,6 @@
movl 0(%rdx, %rdi, COMPRESSED_REFERENCE_SIZE), %edx
testl %edx, %edx // Check null class
jz .Lart_quick_alloc_object_rosalloc_slow_path
- // Check class status.
- cmpl LITERAL(MIRROR_CLASS_STATUS_INITIALIZED), MIRROR_CLASS_STATUS_OFFSET(%rdx)
- jne .Lart_quick_alloc_object_rosalloc_slow_path
- // We don't need a fence (between the
- // the status and the access flag
- // loads) here because every load is
- // a load acquire on x86.
- // Check access flags has
- // kAccClassIsFinalizable
- testl LITERAL(ACCESS_FLAGS_CLASS_IS_FINALIZABLE), MIRROR_CLASS_ACCESS_FLAGS_OFFSET(%rdx)
- jnz .Lart_quick_alloc_object_rosalloc_slow_path
// Check if the thread local
// allocation stack has room.
movq %gs:THREAD_SELF_OFFSET, %r8 // r8 = thread
@@ -953,22 +942,21 @@
cmpq THREAD_LOCAL_ALLOC_STACK_END_OFFSET(%r8), %rcx
jae .Lart_quick_alloc_object_rosalloc_slow_path
// Load the object size
- movl MIRROR_CLASS_OBJECT_SIZE_OFFSET(%rdx), %eax
+ movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%rdx), %eax
// Check if the size is for a thread
- // local allocation
+ // local allocation. Also does the
+ // initialized and finalizable checks.
cmpl LITERAL(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), %eax
ja .Lart_quick_alloc_object_rosalloc_slow_path
// Compute the rosalloc bracket index
// from the size.
- // Align up the size by the rosalloc
- // bracket quantum size and divide
- // by the quantum size and subtract
- // by 1. This code is a shorter but
- // equivalent version.
- subq LITERAL(1), %rax
shrq LITERAL(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT), %rax
// Load the rosalloc run (r9)
- movq THREAD_ROSALLOC_RUNS_OFFSET(%r8, %rax, __SIZEOF_POINTER__), %r9
+ // Subtract __SIZEOF_POINTER__ to
+ // subtract one from edi as there is no
+ // 0 byte run and the size is already
+ // aligned.
+ movq (THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)(%r8, %rax, __SIZEOF_POINTER__), %r9
// Load the free list head (rax). This
// will be the return val.
movq (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%r9), %rax
@@ -1020,21 +1008,12 @@
END_MACRO
// The common fast path code for art_quick_alloc_object_resolved_region_tlab.
+// TODO: delete ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH since it is the same as
+// ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH.
//
// RDI: type_idx, RSI: ArtMethod*, RDX/EDX: the class, RAX: return value.
// RCX: scratch, r8: Thread::Current().
MACRO1(ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH, slowPathLabel)
- // Check class status.
- cmpl LITERAL(MIRROR_CLASS_STATUS_INITIALIZED), MIRROR_CLASS_STATUS_OFFSET(%rdx)
- jne RAW_VAR(slowPathLabel)
- // No fake dependence needed on x86
- // between status and flags load,
- // since each load is a load-acquire,
- // no loads reordering.
- // Check access flags has
- // kAccClassIsFinalizable
- testl LITERAL(ACCESS_FLAGS_CLASS_IS_FINALIZABLE), MIRROR_CLASS_ACCESS_FLAGS_OFFSET(%rdx)
- jnz RAW_VAR(slowPathLabel)
ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH(RAW_VAR(slowPathLabel))
END_MACRO
@@ -1044,19 +1023,16 @@
// RCX: scratch, r8: Thread::Current().
MACRO1(ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH, slowPathLabel)
movq %gs:THREAD_SELF_OFFSET, %r8 // r8 = thread
- movl MIRROR_CLASS_OBJECT_SIZE_OFFSET(%rdx), %ecx // Load the object size.
+ movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%rdx), %ecx // Load the object size.
movq THREAD_LOCAL_POS_OFFSET(%r8), %rax
- leaq OBJECT_ALIGNMENT_MASK(%rax, %rcx), %rcx // Add size to pos, note that these
+ addq %rax, %rcx // Add size to pos, note that these
// are both 32 bit ints, overflow
// will cause the add to be past the
// end of the thread local region.
- // Also sneak in alignment mask add.
- andq LITERAL(OBJECT_ALIGNMENT_MASK_TOGGLED64), %rcx // Align the size by 8. (addr + 7) &
- // ~7.
cmpq THREAD_LOCAL_END_OFFSET(%r8), %rcx // Check if it fits.
ja RAW_VAR(slowPathLabel)
movq %rcx, THREAD_LOCAL_POS_OFFSET(%r8) // Update thread_local_pos.
- addq LITERAL(1), THREAD_LOCAL_OBJECTS_OFFSET(%r8) // Increase thread_local_objects.
+ incq THREAD_LOCAL_OBJECTS_OFFSET(%r8) // Increase thread_local_objects.
// Store the class pointer in the
// header.
// No fence needed for x86.
@@ -1268,28 +1244,9 @@
int3
int3
#endif
+ // No read barrier since the caller is responsible for that.
movq %rdi, %rdx
- cmpl LITERAL(0), %gs:THREAD_IS_GC_MARKING_OFFSET
- jne .Lart_quick_alloc_object_resolved_region_tlab_class_load_read_barrier_marking
-.Lart_quick_alloc_object_resolved_region_tlab_class_load_read_barrier_slow_path_exit:
ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_region_tlab_slow_path
-.Lart_quick_alloc_object_resolved_region_tlab_class_load_read_barrier_marking:
- // Check the mark bit, if it is 1 avoid the read barrier.
- testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(%edx)
- jnz .Lart_quick_alloc_object_resolved_region_tlab_class_load_read_barrier_slow_path_exit
-.Lart_quick_alloc_object_resolved_region_tlab_class_load_read_barrier_slow_path:
- // The read barrier slow path. Mark the class.
- PUSH rdi
- PUSH rsi
- subq LITERAL(8), %rsp // 16 byte alignment
- // Outgoing argument set up
- movq %rdx, %rdi // Pass the class as the first param.
- call SYMBOL(artReadBarrierMark) // cxx_name(mirror::Object* obj)
- movq %rax, %rdx
- addq LITERAL(8), %rsp
- POP rsi
- POP rdi
- jmp .Lart_quick_alloc_object_resolved_region_tlab_class_load_read_barrier_slow_path_exit
.Lart_quick_alloc_object_resolved_region_tlab_slow_path:
ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeResolvedRegionTLAB
END_FUNCTION art_quick_alloc_object_resolved_region_tlab
@@ -1303,29 +1260,9 @@
int3
int3
#endif
- // Might need a special macro since rsi and edx is 32b/64b mismatched.
movq %rdi, %rdx
- cmpl LITERAL(0), %gs:THREAD_IS_GC_MARKING_OFFSET
- jne .Lart_quick_alloc_object_initialized_region_tlab_class_load_read_barrier_marking
-.Lart_quick_alloc_object_initialized_region_tlab_class_load_read_barrier_slow_path_exit:
+ // No read barrier since the caller is responsible for that.
ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH .Lart_quick_alloc_object_initialized_region_tlab_slow_path
-.Lart_quick_alloc_object_initialized_region_tlab_class_load_read_barrier_marking:
- // Check the mark bit, if it is 1 avoid the read barrier.
- testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(%edx)
- jnz .Lart_quick_alloc_object_initialized_region_tlab_class_load_read_barrier_slow_path
-.Lart_quick_alloc_object_initialized_region_tlab_class_load_read_barrier_slow_path:
- // The read barrier slow path. Mark the class.
- PUSH rdi
- PUSH rsi
- subq LITERAL(8), %rsp // 16 byte alignment
- // Outgoing argument set up
- movq %rdx, %rdi // Pass the class as the first param.
- call SYMBOL(artReadBarrierMark) // cxx_name(mirror::Object* obj)
- movq %rax, %rdx
- addq LITERAL(8), %rsp
- POP rsi
- POP rdi
- jmp .Lart_quick_alloc_object_initialized_region_tlab_class_load_read_barrier_slow_path_exit
.Lart_quick_alloc_object_initialized_region_tlab_slow_path:
ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeInitializedRegionTLAB
END_FUNCTION art_quick_alloc_object_initialized_region_tlab
@@ -2116,7 +2053,7 @@
* will long jump to the interpreter bridge.
*/
DEFINE_FUNCTION art_quick_deoptimize_from_compiled_code
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
+ SETUP_SAVE_EVERYTHING_FRAME
// Stack should be aligned now.
movq %gs:THREAD_SELF_OFFSET, %rdi // Pass Thread.
call SYMBOL(artDeoptimizeFromCompiledCode) // artDeoptimizeFromCompiledCode(Thread*)
diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h
index a102858..ef75f94 100644
--- a/runtime/art_field-inl.h
+++ b/runtime/art_field-inl.h
@@ -254,7 +254,7 @@
SetObj<kTransactionActive>(object, l);
}
-inline const char* ArtField::GetName() SHARED_REQUIRES(Locks::mutator_lock_) {
+inline const char* ArtField::GetName() REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t field_index = GetDexFieldIndex();
if (UNLIKELY(GetDeclaringClass()->IsProxyClass())) {
DCHECK(IsStatic());
@@ -265,7 +265,7 @@
return dex_file->GetFieldName(dex_file->GetFieldId(field_index));
}
-inline const char* ArtField::GetTypeDescriptor() SHARED_REQUIRES(Locks::mutator_lock_) {
+inline const char* ArtField::GetTypeDescriptor() REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t field_index = GetDexFieldIndex();
if (UNLIKELY(GetDeclaringClass()->IsProxyClass())) {
DCHECK(IsStatic());
@@ -279,11 +279,11 @@
}
inline Primitive::Type ArtField::GetTypeAsPrimitiveType()
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return Primitive::GetType(GetTypeDescriptor()[0]);
}
-inline bool ArtField::IsPrimitiveType() SHARED_REQUIRES(Locks::mutator_lock_) {
+inline bool ArtField::IsPrimitiveType() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetTypeAsPrimitiveType() != Primitive::kPrimNot;
}
@@ -305,15 +305,15 @@
return type;
}
-inline size_t ArtField::FieldSize() SHARED_REQUIRES(Locks::mutator_lock_) {
+inline size_t ArtField::FieldSize() REQUIRES_SHARED(Locks::mutator_lock_) {
return Primitive::ComponentSize(GetTypeAsPrimitiveType());
}
-inline mirror::DexCache* ArtField::GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_) {
+inline mirror::DexCache* ArtField::GetDexCache() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetDeclaringClass()->GetDexCache();
}
-inline const DexFile* ArtField::GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_) {
+inline const DexFile* ArtField::GetDexFile() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetDexCache()->GetDexFile();
}
@@ -349,7 +349,7 @@
template <bool kExactOffset>
static inline ArtField* FindFieldWithOffset(
const IterationRange<StrideIterator<ArtField>>& fields,
- uint32_t field_offset) SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t field_offset) REQUIRES_SHARED(Locks::mutator_lock_) {
for (ArtField& field : fields) {
if (kExactOffset) {
if (field.GetOffset().Uint32Value() == field_offset) {
diff --git a/runtime/art_field.h b/runtime/art_field.h
index aaccbf3..16e6c75 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -42,27 +42,27 @@
ArtField();
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Class* GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_);
void SetDeclaringClass(mirror::Class *new_declaring_class)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- uint32_t GetAccessFlags() SHARED_REQUIRES(Locks::mutator_lock_);
+ uint32_t GetAccessFlags() REQUIRES_SHARED(Locks::mutator_lock_);
- void SetAccessFlags(uint32_t new_access_flags) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetAccessFlags(uint32_t new_access_flags) REQUIRES_SHARED(Locks::mutator_lock_) {
// Not called within a transaction.
access_flags_ = new_access_flags;
}
- bool IsPublic() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPublic() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccPublic) != 0;
}
- bool IsStatic() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsStatic() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccStatic) != 0;
}
- bool IsFinal() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsFinal() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccFinal) != 0;
}
@@ -76,86 +76,86 @@
}
// Offset to field within an Object.
- MemberOffset GetOffset() SHARED_REQUIRES(Locks::mutator_lock_);
+ MemberOffset GetOffset() REQUIRES_SHARED(Locks::mutator_lock_);
static MemberOffset OffsetOffset() {
return MemberOffset(OFFSETOF_MEMBER(ArtField, offset_));
}
- MemberOffset GetOffsetDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_);
+ MemberOffset GetOffsetDuringLinking() REQUIRES_SHARED(Locks::mutator_lock_);
- void SetOffset(MemberOffset num_bytes) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetOffset(MemberOffset num_bytes) REQUIRES_SHARED(Locks::mutator_lock_);
// field access, null object for static fields
- uint8_t GetBoolean(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint8_t GetBoolean(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetBoolean(mirror::Object* object, uint8_t z) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetBoolean(mirror::Object* object, uint8_t z) REQUIRES_SHARED(Locks::mutator_lock_);
- int8_t GetByte(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ int8_t GetByte(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetByte(mirror::Object* object, int8_t b) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetByte(mirror::Object* object, int8_t b) REQUIRES_SHARED(Locks::mutator_lock_);
- uint16_t GetChar(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint16_t GetChar(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetChar(mirror::Object* object, uint16_t c) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetChar(mirror::Object* object, uint16_t c) REQUIRES_SHARED(Locks::mutator_lock_);
- int16_t GetShort(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ int16_t GetShort(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetShort(mirror::Object* object, int16_t s) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetShort(mirror::Object* object, int16_t s) REQUIRES_SHARED(Locks::mutator_lock_);
- int32_t GetInt(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ int32_t GetInt(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetInt(mirror::Object* object, int32_t i) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetInt(mirror::Object* object, int32_t i) REQUIRES_SHARED(Locks::mutator_lock_);
- int64_t GetLong(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ int64_t GetLong(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetLong(mirror::Object* object, int64_t j) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetLong(mirror::Object* object, int64_t j) REQUIRES_SHARED(Locks::mutator_lock_);
- float GetFloat(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ float GetFloat(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetFloat(mirror::Object* object, float f) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetFloat(mirror::Object* object, float f) REQUIRES_SHARED(Locks::mutator_lock_);
- double GetDouble(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ double GetDouble(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetDouble(mirror::Object* object, double d) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetDouble(mirror::Object* object, double d) REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::Object* GetObject(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Object* GetObject(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
void SetObject(mirror::Object* object, mirror::Object* l)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Raw field accesses.
- uint32_t Get32(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint32_t Get32(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
void Set32(mirror::Object* object, uint32_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- uint64_t Get64(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint64_t Get64(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void Set64(mirror::Object* object, uint64_t new_value) SHARED_REQUIRES(Locks::mutator_lock_);
+ void Set64(mirror::Object* object, uint64_t new_value) REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::Object* GetObj(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Object* GetObj(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
void SetObj(mirror::Object* object, mirror::Object* new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
template<typename RootVisitorType>
void VisitRoots(RootVisitorType& visitor) NO_THREAD_SAFETY_ANALYSIS;
- bool IsVolatile() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsVolatile() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccVolatile) != 0;
}
@@ -164,35 +164,35 @@
// offset.
template <bool kExactOffset = true>
static ArtField* FindInstanceFieldWithOffset(mirror::Class* klass, uint32_t field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns a static field with this offset in the given class or null if not found.
// If kExactOffset is true then we only find the matching offset, not the field containing the
// offset.
template <bool kExactOffset = true>
static ArtField* FindStaticFieldWithOffset(mirror::Class* klass, uint32_t field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- const char* GetName() SHARED_REQUIRES(Locks::mutator_lock_);
+ const char* GetName() REQUIRES_SHARED(Locks::mutator_lock_);
// Resolves / returns the name from the dex cache.
mirror::String* GetStringName(Thread* self, bool resolve)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- const char* GetTypeDescriptor() SHARED_REQUIRES(Locks::mutator_lock_);
+ const char* GetTypeDescriptor() REQUIRES_SHARED(Locks::mutator_lock_);
- Primitive::Type GetTypeAsPrimitiveType() SHARED_REQUIRES(Locks::mutator_lock_);
+ Primitive::Type GetTypeAsPrimitiveType() REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsPrimitiveType() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsPrimitiveType() REQUIRES_SHARED(Locks::mutator_lock_);
template <bool kResolve>
- mirror::Class* GetType() SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Class* GetType() REQUIRES_SHARED(Locks::mutator_lock_);
- size_t FieldSize() SHARED_REQUIRES(Locks::mutator_lock_);
+ size_t FieldSize() REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::DexCache* GetDexCache() REQUIRES_SHARED(Locks::mutator_lock_);
- const DexFile* GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_);
+ const DexFile* GetDexFile() REQUIRES_SHARED(Locks::mutator_lock_);
GcRoot<mirror::Class>& DeclaringClassRoot() {
return declaring_class_;
@@ -201,15 +201,15 @@
// Update the declaring class with the passed in visitor. Does not use read barrier.
template <typename Visitor>
ALWAYS_INLINE void UpdateObjects(const Visitor& visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
mirror::Class* ProxyFindSystemClass(const char* descriptor)
- SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::Class* ResolveGetType(uint32_t type_idx) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ mirror::Class* ResolveGetType(uint32_t type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
mirror::String* ResolveGetStringName(Thread* self, const DexFile& dex_file, uint32_t string_idx,
mirror::DexCache* dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
GcRoot<mirror::Class> declaring_class_;
diff --git a/runtime/art_method.h b/runtime/art_method.h
index a90ef23..b25087c 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -29,7 +29,6 @@
#include "modifiers.h"
#include "mirror/object.h"
#include "read_barrier_option.h"
-#include "stack.h"
#include "utils.h"
namespace art {
@@ -228,20 +227,20 @@
static ArtMethod* FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa,
jobject jlr_method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- ALWAYS_INLINE mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE mirror::Class* GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_);
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ALWAYS_INLINE mirror::Class* GetDeclaringClassUnchecked()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SetDeclaringClass(mirror::Class *new_declaring_class)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool CASDeclaringClass(mirror::Class* expected_class, mirror::Class* desired_class)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static MemberOffset DeclaringClassOffset() {
return MemberOffset(OFFSETOF_MEMBER(ArtMethod, declaring_class_));
@@ -258,7 +257,7 @@
}
// Approximate what kind of method call would be used for this method.
- InvokeType GetInvokeType() SHARED_REQUIRES(Locks::mutator_lock_);
+ InvokeType GetInvokeType() REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if the method is declared public.
bool IsPublic() {
@@ -358,7 +357,7 @@
}
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsProxyMethod() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsProxyMethod() REQUIRES_SHARED(Locks::mutator_lock_);
bool SkipAccessChecks() {
return (GetAccessFlags() & kAccSkipAccessChecks) != 0;
@@ -380,24 +379,24 @@
bool IsAnnotatedWithFastNative();
// Returns true if this method could be overridden by a default method.
- bool IsOverridableByDefaultMethod() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsOverridableByDefaultMethod() REQUIRES_SHARED(Locks::mutator_lock_);
- bool CheckIncompatibleClassChange(InvokeType type) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool CheckIncompatibleClassChange(InvokeType type) REQUIRES_SHARED(Locks::mutator_lock_);
// Throws the error that would result from trying to invoke this method (i.e.
// IncompatibleClassChangeError or AbstractMethodError). Only call if !IsInvokable();
- void ThrowInvocationTimeError() SHARED_REQUIRES(Locks::mutator_lock_);
+ void ThrowInvocationTimeError() REQUIRES_SHARED(Locks::mutator_lock_);
- uint16_t GetMethodIndex() SHARED_REQUIRES(Locks::mutator_lock_);
+ uint16_t GetMethodIndex() REQUIRES_SHARED(Locks::mutator_lock_);
// Doesn't do erroneous / unresolved class checks.
- uint16_t GetMethodIndexDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_);
+ uint16_t GetMethodIndexDuringLinking() REQUIRES_SHARED(Locks::mutator_lock_);
- size_t GetVtableIndex() SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t GetVtableIndex() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetMethodIndex();
}
- void SetMethodIndex(uint16_t new_method_index) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetMethodIndex(uint16_t new_method_index) REQUIRES_SHARED(Locks::mutator_lock_) {
// Not called within a transaction.
method_index_ = new_method_index;
}
@@ -422,9 +421,9 @@
// Number of 32bit registers that would be required to hold all the arguments
static size_t NumArgRegisters(const StringPiece& shorty);
- ALWAYS_INLINE uint32_t GetDexMethodIndex() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t GetDexMethodIndex() REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE uint32_t GetImtIndex() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t GetImtIndex() REQUIRES_SHARED(Locks::mutator_lock_);
void SetDexMethodIndex(uint32_t new_idx) {
// Not called within a transaction.
@@ -432,45 +431,45 @@
}
ALWAYS_INLINE ArtMethod** GetDexCacheResolvedMethods(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE ArtMethod* GetDexCacheResolvedMethod(uint16_t method_index,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE void SetDexCacheResolvedMethod(uint16_t method_index,
ArtMethod* new_method,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE void SetDexCacheResolvedMethods(ArtMethod** new_dex_cache_methods,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
- bool HasDexCacheResolvedMethods(PointerSize pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ bool HasDexCacheResolvedMethods(PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_);
bool HasSameDexCacheResolvedMethods(ArtMethod* other, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool HasSameDexCacheResolvedMethods(ArtMethod** other_cache, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <bool kWithCheck = true>
mirror::Class* GetDexCacheResolvedType(uint32_t type_idx, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SetDexCacheResolvedTypes(GcRoot<mirror::Class>* new_dex_cache_types,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
- bool HasDexCacheResolvedTypes(PointerSize pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ bool HasDexCacheResolvedTypes(PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_);
bool HasSameDexCacheResolvedTypes(ArtMethod* other, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool HasSameDexCacheResolvedTypes(GcRoot<mirror::Class>* other_cache, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get the Class* from the type index into this method's dex cache.
mirror::Class* GetClassFromTypeIndex(uint16_t type_idx, bool resolve, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if this method has the same name and signature of the other method.
- bool HasSameNameAndSignature(ArtMethod* other) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool HasSameNameAndSignature(ArtMethod* other) REQUIRES_SHARED(Locks::mutator_lock_);
// Find the method that this method overrides.
ArtMethod* FindOverriddenMethod(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Find the method index for this method within other_dexfile. If this method isn't present then
// return DexFile::kDexNoIndex. The name_and_signature_idx MUST refer to a MethodId with the same
@@ -478,10 +477,10 @@
// in the other_dexfile.
uint32_t FindDexMethodIndexInOtherDexFile(const DexFile& other_dexfile,
uint32_t name_and_signature_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result, const char* shorty)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const void* GetEntryPointFromQuickCompiledCode() {
return GetEntryPointFromQuickCompiledCodePtrSize(kRuntimePointerSize);
@@ -503,9 +502,9 @@
}
void RegisterNative(const void* native_method, bool is_fast)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- void UnregisterNative() SHARED_REQUIRES(Locks::mutator_lock_);
+ void UnregisterNative() REQUIRES_SHARED(Locks::mutator_lock_);
static MemberOffset DexCacheResolvedMethodsOffset(PointerSize pointer_size) {
return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
@@ -594,13 +593,13 @@
ALWAYS_INLINE bool IsRuntimeMethod();
// Is this a hand crafted method used for something like describing callee saves?
- bool IsCalleeSaveMethod() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsCalleeSaveMethod() REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsResolutionMethod() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsResolutionMethod() REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsImtUnimplementedMethod() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsImtUnimplementedMethod() REQUIRES_SHARED(Locks::mutator_lock_);
- MethodReference ToMethodReference() SHARED_REQUIRES(Locks::mutator_lock_) {
+ MethodReference ToMethodReference() REQUIRES_SHARED(Locks::mutator_lock_) {
return MethodReference(GetDexFile(), GetDexMethodIndex());
}
@@ -609,66 +608,66 @@
// a move-exception instruction is present.
uint32_t FindCatchBlock(Handle<mirror::Class> exception_type, uint32_t dex_pc,
bool* has_no_move_exception)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename RootVisitorType>
void VisitRoots(RootVisitorType& visitor, PointerSize pointer_size) NO_THREAD_SAFETY_ANALYSIS;
- const DexFile* GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_);
+ const DexFile* GetDexFile() REQUIRES_SHARED(Locks::mutator_lock_);
- const char* GetDeclaringClassDescriptor() SHARED_REQUIRES(Locks::mutator_lock_);
+ const char* GetDeclaringClassDescriptor() REQUIRES_SHARED(Locks::mutator_lock_);
- const char* GetShorty() SHARED_REQUIRES(Locks::mutator_lock_) {
+ const char* GetShorty() REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t unused_length;
return GetShorty(&unused_length);
}
- const char* GetShorty(uint32_t* out_length) SHARED_REQUIRES(Locks::mutator_lock_);
+ const char* GetShorty(uint32_t* out_length) REQUIRES_SHARED(Locks::mutator_lock_);
- const Signature GetSignature() SHARED_REQUIRES(Locks::mutator_lock_);
+ const Signature GetSignature() REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE const char* GetName() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE const char* GetName() REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::String* GetNameAsString(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::String* GetNameAsString(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
- const DexFile::CodeItem* GetCodeItem() SHARED_REQUIRES(Locks::mutator_lock_);
+ const DexFile::CodeItem* GetCodeItem() REQUIRES_SHARED(Locks::mutator_lock_);
bool IsResolvedTypeIdx(uint16_t type_idx, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- int32_t GetLineNumFromDexPC(uint32_t dex_pc) SHARED_REQUIRES(Locks::mutator_lock_);
+ int32_t GetLineNumFromDexPC(uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_);
- const DexFile::ProtoId& GetPrototype() SHARED_REQUIRES(Locks::mutator_lock_);
+ const DexFile::ProtoId& GetPrototype() REQUIRES_SHARED(Locks::mutator_lock_);
- const DexFile::TypeList* GetParameterTypeList() SHARED_REQUIRES(Locks::mutator_lock_);
+ const DexFile::TypeList* GetParameterTypeList() REQUIRES_SHARED(Locks::mutator_lock_);
- const char* GetDeclaringClassSourceFile() SHARED_REQUIRES(Locks::mutator_lock_);
+ const char* GetDeclaringClassSourceFile() REQUIRES_SHARED(Locks::mutator_lock_);
- uint16_t GetClassDefIndex() SHARED_REQUIRES(Locks::mutator_lock_);
+ uint16_t GetClassDefIndex() REQUIRES_SHARED(Locks::mutator_lock_);
- const DexFile::ClassDef& GetClassDef() SHARED_REQUIRES(Locks::mutator_lock_);
+ const DexFile::ClassDef& GetClassDef() REQUIRES_SHARED(Locks::mutator_lock_);
- const char* GetReturnTypeDescriptor() SHARED_REQUIRES(Locks::mutator_lock_);
+ const char* GetReturnTypeDescriptor() REQUIRES_SHARED(Locks::mutator_lock_);
const char* GetTypeDescriptorFromTypeIdx(uint16_t type_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// May cause thread suspension due to GetClassFromTypeIdx calling ResolveType this caused a large
// number of bugs at call sites.
mirror::Class* GetReturnType(bool resolve, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::ClassLoader* GetClassLoader() SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::ClassLoader* GetClassLoader() REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::DexCache* GetDexCache() REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// May cause thread suspension due to class resolution.
bool EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> params)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Size of an instance of this native class.
static size_t Size(PointerSize pointer_size) {
@@ -684,10 +683,10 @@
}
void CopyFrom(ArtMethod* src, PointerSize image_pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE GcRoot<mirror::Class>* GetDexCacheResolvedTypes(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Note, hotness_counter_ updates are non-atomic but it doesn't need to be precise. Also,
// given that the counter is only 16 bits wide we can expect wrap-around in some
@@ -708,15 +707,15 @@
return hotness_count_;
}
- const uint8_t* GetQuickenedInfo() SHARED_REQUIRES(Locks::mutator_lock_);
+ const uint8_t* GetQuickenedInfo() REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the method header for the compiled code containing 'pc'. Note that runtime
// methods will return null for this method, as they are not oat based.
const OatQuickMethodHeader* GetOatQuickMethodHeader(uintptr_t pc)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns whether the method has any compiled code, JIT or AOT.
- bool HasAnyCompiledCode() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool HasAnyCompiledCode() REQUIRES_SHARED(Locks::mutator_lock_);
// Update heap objects and non-entrypoint pointers by the passed in visitor for image relocation.
@@ -724,7 +723,7 @@
template <typename Visitor>
ALWAYS_INLINE void UpdateObjectsForImageRelocation(const Visitor& visitor,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Update entry points by passing them through the visitor.
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 102b993..f4addf7 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -178,10 +178,13 @@
#define MIRROR_CLASS_OBJECT_SIZE_OFFSET (96 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_OBJECT_SIZE_OFFSET,
art::mirror::Class::ObjectSizeOffset().Int32Value())
-#define MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET (100 + MIRROR_OBJECT_HEADER_SIZE)
+#define MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET (100 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET,
+ art::mirror::Class::ObjectSizeAllocFastPathOffset().Int32Value())
+#define MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET (104 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET,
art::mirror::Class::PrimitiveTypeOffset().Int32Value())
-#define MIRROR_CLASS_STATUS_OFFSET (108 + MIRROR_OBJECT_HEADER_SIZE)
+#define MIRROR_CLASS_STATUS_OFFSET (112 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_STATUS_OFFSET,
art::mirror::Class::StatusOffset().Int32Value())
diff --git a/runtime/base/bit_utils.h b/runtime/base/bit_utils.h
index f279f45..d2f0fdb 100644
--- a/runtime/base/bit_utils.h
+++ b/runtime/base/bit_utils.h
@@ -21,50 +21,47 @@
#include <limits>
#include <type_traits>
+// This header is used in the disassembler with libbase's logging. Only include ART logging
+// when no other logging macros are available. b/15436106, b/31338270
+#ifndef CHECK
#include "base/logging.h"
+#endif
+
#include "base/iteration_range.h"
#include "base/stl_util.h"
namespace art {
template<typename T>
-static constexpr int CLZ(T x) {
+constexpr int CLZ(T x) {
static_assert(std::is_integral<T>::value, "T must be integral");
static_assert(std::is_unsigned<T>::value, "T must be unsigned");
static_assert(sizeof(T) <= sizeof(long long), // NOLINT [runtime/int] [4]
"T too large, must be smaller than long long");
- return
- DCHECK_CONSTEXPR(x != 0, "x must not be zero", T(0))
- (sizeof(T) == sizeof(uint32_t))
- ? __builtin_clz(x)
- : __builtin_clzll(x);
+ DCHECK_NE(x, 0u);
+ return (sizeof(T) == sizeof(uint32_t)) ? __builtin_clz(x) : __builtin_clzll(x);
}
template<typename T>
-static constexpr int CTZ(T x) {
+constexpr int CTZ(T x) {
static_assert(std::is_integral<T>::value, "T must be integral");
// It is not unreasonable to ask for trailing zeros in a negative number. As such, do not check
// that T is an unsigned type.
static_assert(sizeof(T) <= sizeof(long long), // NOLINT [runtime/int] [4]
"T too large, must be smaller than long long");
- return
- DCHECK_CONSTEXPR(x != 0, "x must not be zero", T(0))
- (sizeof(T) == sizeof(uint32_t))
- ? __builtin_ctz(x)
- : __builtin_ctzll(x);
+ DCHECK_NE(x, static_cast<T>(0));
+ return (sizeof(T) == sizeof(uint32_t)) ? __builtin_ctz(x) : __builtin_ctzll(x);
}
// Return the number of 1-bits in `x`.
template<typename T>
-static constexpr int POPCOUNT(T x) {
- return (sizeof(T) == sizeof(uint32_t))
- ? __builtin_popcount(x)
- : __builtin_popcountll(x);
+constexpr int POPCOUNT(T x) {
+ return (sizeof(T) == sizeof(uint32_t)) ? __builtin_popcount(x) : __builtin_popcountll(x);
}
// Find the bit position of the most significant bit (0-based), or -1 if there were no bits set.
template <typename T>
-static constexpr ssize_t MostSignificantBit(T value) {
+constexpr ssize_t MostSignificantBit(T value) {
static_assert(std::is_integral<T>::value, "T must be integral");
static_assert(std::is_unsigned<T>::value, "T must be unsigned");
static_assert(std::numeric_limits<T>::radix == 2, "Unexpected radix!");
@@ -73,7 +70,7 @@
// Find the bit position of the least significant bit (0-based), or -1 if there were no bits set.
template <typename T>
-static constexpr ssize_t LeastSignificantBit(T value) {
+constexpr ssize_t LeastSignificantBit(T value) {
static_assert(std::is_integral<T>::value, "T must be integral");
static_assert(std::is_unsigned<T>::value, "T must be unsigned");
return (value == 0) ? -1 : CTZ(value);
@@ -81,12 +78,12 @@
// How many bits (minimally) does it take to store the constant 'value'? i.e. 1 for 1, 3 for 5, etc.
template <typename T>
-static constexpr size_t MinimumBitsToStore(T value) {
+constexpr size_t MinimumBitsToStore(T value) {
return static_cast<size_t>(MostSignificantBit(value) + 1);
}
template <typename T>
-static constexpr inline T RoundUpToPowerOfTwo(T x) {
+constexpr T RoundUpToPowerOfTwo(T x) {
static_assert(std::is_integral<T>::value, "T must be integral");
static_assert(std::is_unsigned<T>::value, "T must be unsigned");
// NOTE: Undefined if x > (1 << (std::numeric_limits<T>::digits - 1)).
@@ -94,14 +91,14 @@
}
template<typename T>
-static constexpr bool IsPowerOfTwo(T x) {
+constexpr bool IsPowerOfTwo(T x) {
static_assert(std::is_integral<T>::value, "T must be integral");
// TODO: assert unsigned. There is currently many uses with signed values.
return (x & (x - 1)) == 0;
}
template<typename T>
-static inline int WhichPowerOf2(T x) {
+constexpr int WhichPowerOf2(T x) {
static_assert(std::is_integral<T>::value, "T must be integral");
// TODO: assert unsigned. There is currently many uses with signed values.
DCHECK((x != 0) && IsPowerOfTwo(x));
@@ -111,53 +108,52 @@
// For rounding integers.
// Note: Omit the `n` from T type deduction, deduce only from the `x` argument.
template<typename T>
-static constexpr T RoundDown(T x, typename Identity<T>::type n) WARN_UNUSED;
+constexpr T RoundDown(T x, typename Identity<T>::type n) WARN_UNUSED;
template<typename T>
-static constexpr T RoundDown(T x, typename Identity<T>::type n) {
- return
- DCHECK_CONSTEXPR(IsPowerOfTwo(n), , T(0))
- (x & -n);
+constexpr T RoundDown(T x, typename Identity<T>::type n) {
+ DCHECK(IsPowerOfTwo(n));
+ return (x & -n);
}
template<typename T>
-static constexpr T RoundUp(T x, typename std::remove_reference<T>::type n) WARN_UNUSED;
+constexpr T RoundUp(T x, typename std::remove_reference<T>::type n) WARN_UNUSED;
template<typename T>
-static constexpr T RoundUp(T x, typename std::remove_reference<T>::type n) {
+constexpr T RoundUp(T x, typename std::remove_reference<T>::type n) {
return RoundDown(x + n - 1, n);
}
// For aligning pointers.
template<typename T>
-static inline T* AlignDown(T* x, uintptr_t n) WARN_UNUSED;
+inline T* AlignDown(T* x, uintptr_t n) WARN_UNUSED;
template<typename T>
-static inline T* AlignDown(T* x, uintptr_t n) {
+inline T* AlignDown(T* x, uintptr_t n) {
return reinterpret_cast<T*>(RoundDown(reinterpret_cast<uintptr_t>(x), n));
}
template<typename T>
-static inline T* AlignUp(T* x, uintptr_t n) WARN_UNUSED;
+inline T* AlignUp(T* x, uintptr_t n) WARN_UNUSED;
template<typename T>
-static inline T* AlignUp(T* x, uintptr_t n) {
+inline T* AlignUp(T* x, uintptr_t n) {
return reinterpret_cast<T*>(RoundUp(reinterpret_cast<uintptr_t>(x), n));
}
template<int n, typename T>
-static constexpr bool IsAligned(T x) {
+constexpr bool IsAligned(T x) {
static_assert((n & (n - 1)) == 0, "n is not a power of two");
return (x & (n - 1)) == 0;
}
template<int n, typename T>
-static inline bool IsAligned(T* x) {
+inline bool IsAligned(T* x) {
return IsAligned<n>(reinterpret_cast<const uintptr_t>(x));
}
template<typename T>
-static inline bool IsAlignedParam(T x, int n) {
+inline bool IsAlignedParam(T x, int n) {
return (x & (n - 1)) == 0;
}
@@ -175,9 +171,9 @@
// Like sizeof, but count how many bits a type takes. Pass type explicitly.
template <typename T>
-static constexpr size_t BitSizeOf() {
+constexpr size_t BitSizeOf() {
static_assert(std::is_integral<T>::value, "T must be integral");
- typedef typename std::make_unsigned<T>::type unsigned_type;
+ using unsigned_type = typename std::make_unsigned<T>::type;
static_assert(sizeof(T) == sizeof(unsigned_type), "Unexpected type size mismatch!");
static_assert(std::numeric_limits<unsigned_type>::radix == 2, "Unexpected radix!");
return std::numeric_limits<unsigned_type>::digits;
@@ -185,29 +181,29 @@
// Like sizeof, but count how many bits a type takes. Infers type from parameter.
template <typename T>
-static constexpr size_t BitSizeOf(T /*x*/) {
+constexpr size_t BitSizeOf(T /*x*/) {
return BitSizeOf<T>();
}
-static inline uint16_t Low16Bits(uint32_t value) {
+inline uint16_t Low16Bits(uint32_t value) {
return static_cast<uint16_t>(value);
}
-static inline uint16_t High16Bits(uint32_t value) {
+inline uint16_t High16Bits(uint32_t value) {
return static_cast<uint16_t>(value >> 16);
}
-static inline uint32_t Low32Bits(uint64_t value) {
+inline uint32_t Low32Bits(uint64_t value) {
return static_cast<uint32_t>(value);
}
-static inline uint32_t High32Bits(uint64_t value) {
+inline uint32_t High32Bits(uint64_t value) {
return static_cast<uint32_t>(value >> 32);
}
// Check whether an N-bit two's-complement representation can hold value.
template <typename T>
-static inline bool IsInt(size_t N, T value) {
+inline bool IsInt(size_t N, T value) {
if (N == BitSizeOf<T>()) {
return true;
} else {
@@ -219,15 +215,14 @@
}
template <typename T>
-static constexpr T GetIntLimit(size_t bits) {
- return
- DCHECK_CONSTEXPR(bits > 0, "bits cannot be zero", 0)
- DCHECK_CONSTEXPR(bits < BitSizeOf<T>(), "kBits must be < max.", 0)
- static_cast<T>(1) << (bits - 1);
+constexpr T GetIntLimit(size_t bits) {
+ DCHECK_NE(bits, 0u);
+ DCHECK_LT(bits, BitSizeOf<T>());
+ return static_cast<T>(1) << (bits - 1);
}
template <size_t kBits, typename T>
-static constexpr bool IsInt(T value) {
+constexpr bool IsInt(T value) {
static_assert(kBits > 0, "kBits cannot be zero.");
static_assert(kBits <= BitSizeOf<T>(), "kBits must be <= max.");
static_assert(std::is_signed<T>::value, "Needs a signed type.");
@@ -239,7 +234,7 @@
}
template <size_t kBits, typename T>
-static constexpr bool IsUint(T value) {
+constexpr bool IsUint(T value) {
static_assert(kBits > 0, "kBits cannot be zero.");
static_assert(kBits <= BitSizeOf<T>(), "kBits must be <= max.");
static_assert(std::is_integral<T>::value, "Needs an integral type.");
@@ -247,17 +242,17 @@
// trivially true.
// NOTE: To avoid triggering assertion in GetIntLimit(kBits+1) if kBits+1==BitSizeOf<T>(),
// use GetIntLimit(kBits)*2u. The unsigned arithmetic works well for us if it overflows.
+ using unsigned_type = typename std::make_unsigned<T>::type;
return (0 <= value) &&
(kBits == BitSizeOf<T>() ||
- (static_cast<typename std::make_unsigned<T>::type>(value) <=
- GetIntLimit<typename std::make_unsigned<T>::type>(kBits) * 2u - 1u));
+ (static_cast<unsigned_type>(value) <= GetIntLimit<unsigned_type>(kBits) * 2u - 1u));
}
template <size_t kBits, typename T>
-static constexpr bool IsAbsoluteUint(T value) {
+constexpr bool IsAbsoluteUint(T value) {
static_assert(kBits <= BitSizeOf<T>(), "kBits must be <= max.");
static_assert(std::is_integral<T>::value, "Needs an integral type.");
- typedef typename std::make_unsigned<T>::type unsigned_type;
+ using unsigned_type = typename std::make_unsigned<T>::type;
return (kBits == BitSizeOf<T>())
? true
: IsUint<kBits>(value < 0
@@ -267,29 +262,26 @@
// Generate maximum/minimum values for signed/unsigned n-bit integers
template <typename T>
-static constexpr T MaxInt(size_t bits) {
- return
- DCHECK_CONSTEXPR(std::is_unsigned<T>::value || bits > 0, "bits cannot be zero for signed", 0)
- DCHECK_CONSTEXPR(bits <= BitSizeOf<T>(), "kBits must be < max.", 0)
- bits == BitSizeOf<T>()
- ? std::numeric_limits<T>::max()
- : std::is_signed<T>::value
- ? (bits == 1
- ? 0
- : static_cast<T>(MaxInt<typename std::make_unsigned<T>::type>(bits - 1)))
- : static_cast<T>(UINT64_C(1) << bits) - static_cast<T>(1);
+constexpr T MaxInt(size_t bits) {
+ DCHECK(std::is_unsigned<T>::value || bits > 0u) << "bits cannot be zero for signed.";
+ DCHECK_LE(bits, BitSizeOf<T>());
+ using unsigned_type = typename std::make_unsigned<T>::type;
+ return bits == BitSizeOf<T>()
+ ? std::numeric_limits<T>::max()
+ : std::is_signed<T>::value
+ ? ((bits == 1u) ? 0 : static_cast<T>(MaxInt<unsigned_type>(bits - 1)))
+ : static_cast<T>(UINT64_C(1) << bits) - static_cast<T>(1);
}
template <typename T>
-static constexpr T MinInt(size_t bits) {
- return
- DCHECK_CONSTEXPR(std::is_unsigned<T>::value || bits > 0, "bits cannot be zero for signed", 0)
- DCHECK_CONSTEXPR(bits <= BitSizeOf<T>(), "kBits must be < max.", 0)
- bits == BitSizeOf<T>()
- ? std::numeric_limits<T>::min()
- : std::is_signed<T>::value
- ? (bits == 1 ? -1 : static_cast<T>(-1) - MaxInt<T>(bits))
- : static_cast<T>(0);
+constexpr T MinInt(size_t bits) {
+ DCHECK(std::is_unsigned<T>::value || bits > 0) << "bits cannot be zero for signed.";
+ DCHECK_LE(bits, BitSizeOf<T>());
+ return bits == BitSizeOf<T>()
+ ? std::numeric_limits<T>::min()
+ : std::is_signed<T>::value
+ ? ((bits == 1u) ? -1 : static_cast<T>(-1) - MaxInt<T>(bits))
+ : static_cast<T>(0);
}
// Using the Curiously Recurring Template Pattern to implement everything shared
diff --git a/runtime/base/enums.h b/runtime/base/enums.h
index 51b86ea..52cab2a 100644
--- a/runtime/base/enums.h
+++ b/runtime/base/enums.h
@@ -20,9 +20,6 @@
#include <cstddef>
#include <ostream>
-#include "base/logging.h"
-#include "base/macros.h"
-
namespace art {
enum class PointerSize : size_t {
@@ -35,16 +32,6 @@
? PointerSize::k64
: PointerSize::k32;
-template <typename T>
-static constexpr PointerSize ConvertToPointerSize(T any) {
- if (any == 4 || any == 8) {
- return static_cast<PointerSize>(any);
- } else {
- LOG(FATAL);
- UNREACHABLE();
- }
-}
-
} // namespace art
#endif // ART_RUNTIME_BASE_ENUMS_H_
diff --git a/runtime/base/iteration_range.h b/runtime/base/iteration_range.h
index cf02d32..54ab174 100644
--- a/runtime/base/iteration_range.h
+++ b/runtime/base/iteration_range.h
@@ -45,12 +45,12 @@
};
template <typename Iter>
-static inline IterationRange<Iter> MakeIterationRange(const Iter& begin_it, const Iter& end_it) {
+inline IterationRange<Iter> MakeIterationRange(const Iter& begin_it, const Iter& end_it) {
return IterationRange<Iter>(begin_it, end_it);
}
template <typename Iter>
-static inline IterationRange<Iter> MakeEmptyIterationRange(const Iter& it) {
+inline IterationRange<Iter> MakeEmptyIterationRange(const Iter& it) {
return IterationRange<Iter>(it, it);
}
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index 28352cb..529c391 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -139,13 +139,10 @@
class LogMessageData {
public:
LogMessageData(const char* file, unsigned int line, LogSeverity severity, int error)
- : file_(file),
+ : file_(GetFilenameBase(file)),
line_number_(line),
severity_(severity),
- error_(error) {
- const char* last_slash = strrchr(file, '/');
- file = (last_slash == nullptr) ? file : last_slash + 1;
- }
+ error_(error) {}
const char * GetFile() const {
return file_;
@@ -178,6 +175,11 @@
const LogSeverity severity_;
const int error_;
+ static const char* GetFilenameBase(const char* file) {
+ const char* last_slash = strrchr(file, '/');
+ return (last_slash == nullptr) ? file : last_slash + 1;
+ }
+
DISALLOW_COPY_AND_ASSIGN(LogMessageData);
};
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index ac21a3f..f43cb8b 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -161,16 +161,6 @@
} \
} while (false)
-// CHECK that can be used in a constexpr function. For example,
-// constexpr int half(int n) {
-// return
-// DCHECK_CONSTEXPR(n >= 0, , 0)
-// CHECK_CONSTEXPR((n & 1) == 0), << "Extra debugging output: n = " << n, 0)
-// n / 2;
-// }
-#define CHECK_CONSTEXPR(x, out, dummy) \
- (UNLIKELY(!(x))) ? (LOG(::art::FATAL) << "Check failed: " << #x out, dummy) :
-
// DCHECKs are debug variants of CHECKs only enabled in debug builds. Generally CHECK should be
// used unless profiling identifies a CHECK as being in performance critical code.
@@ -189,11 +179,6 @@
#define DCHECK_GT(x, y) if (::art::kEnableDChecks) CHECK_GT(x, y)
#define DCHECK_STREQ(s1, s2) if (::art::kEnableDChecks) CHECK_STREQ(s1, s2)
#define DCHECK_STRNE(s1, s2) if (::art::kEnableDChecks) CHECK_STRNE(s1, s2)
-#if defined(NDEBUG)
-#define DCHECK_CONSTEXPR(x, out, dummy)
-#else
-#define DCHECK_CONSTEXPR(x, out, dummy) CHECK_CONSTEXPR(x, out, dummy)
-#endif
// Temporary class created to evaluate the LHS and RHS, used with MakeEagerEvaluator to infer the
// types of LHS and RHS.
@@ -206,7 +191,7 @@
// Helper function for CHECK_xx.
template <typename LHS, typename RHS>
-static inline constexpr EagerEvaluator<LHS, RHS> MakeEagerEvaluator(LHS lhs, RHS rhs) {
+constexpr EagerEvaluator<LHS, RHS> MakeEagerEvaluator(LHS lhs, RHS rhs) {
return EagerEvaluator<LHS, RHS>(lhs, rhs);
}
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index 0ec6e6d..6cd7d60 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -20,15 +20,8 @@
#include <stddef.h> // for size_t
#include <unistd.h> // for TEMP_FAILURE_RETRY
-// bionic and glibc both have TEMP_FAILURE_RETRY, but eg Mac OS' libc doesn't.
-#ifndef TEMP_FAILURE_RETRY
-#define TEMP_FAILURE_RETRY(exp) ({ \
- decltype(exp) _rc; \
- do { \
- _rc = (exp); \
- } while (_rc == -1 && errno == EINTR); \
- _rc; })
-#endif
+#include "android-base/macros.h"
+#include "android-base/thread_annotations.h"
#define OVERRIDE override
#define FINAL final
@@ -42,23 +35,6 @@
#define ART_FRIEND_TYPED_TEST(test_set_name, individual_test)\
template<typename T> ART_FRIEND_TEST(test_set_name, individual_test)
-// DISALLOW_COPY_AND_ASSIGN disallows the copy and operator= functions. It goes in the private:
-// declarations in a class.
-#if !defined(DISALLOW_COPY_AND_ASSIGN)
-#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
- TypeName(const TypeName&) = delete; \
- void operator=(const TypeName&) = delete
-#endif
-
-// A macro to disallow all the implicit constructors, namely the default constructor, copy
-// constructor and operator= functions.
-//
-// This should be used in the private: declarations for a class that wants to prevent anyone from
-// instantiating it. This is especially useful for classes containing only static methods.
-#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
- TypeName() = delete; \
- DISALLOW_COPY_AND_ASSIGN(TypeName)
-
// A macro to disallow new and delete operators for a class. It goes in the private: declarations.
// NOTE: Providing placement new (and matching delete) for constructing container elements.
#define DISALLOW_ALLOCATION() \
@@ -69,64 +45,6 @@
private: \
void* operator new(size_t) = delete // NOLINT
-// The arraysize(arr) macro returns the # of elements in an array arr.
-// The expression is a compile-time constant, and therefore can be
-// used in defining new arrays, for example. If you use arraysize on
-// a pointer by mistake, you will get a compile-time error.
-//
-// One caveat is that arraysize() doesn't accept any array of an
-// anonymous type or a type defined inside a function. In these rare
-// cases, you have to use the unsafe ARRAYSIZE_UNSAFE() macro below. This is
-// due to a limitation in C++'s template system. The limitation might
-// eventually be removed, but it hasn't happened yet.
-
-// This template function declaration is used in defining arraysize.
-// Note that the function doesn't need an implementation, as we only
-// use its type.
-template <typename T, size_t N>
-char (&ArraySizeHelper(T (&array)[N]))[N];
-
-#define arraysize(array) (sizeof(ArraySizeHelper(array)))
-
-// ARRAYSIZE_UNSAFE performs essentially the same calculation as arraysize,
-// but can be used on anonymous types or types defined inside
-// functions. It's less safe than arraysize as it accepts some
-// (although not all) pointers. Therefore, you should use arraysize
-// whenever possible.
-//
-// The expression ARRAYSIZE_UNSAFE(a) is a compile-time constant of type
-// size_t.
-//
-// ARRAYSIZE_UNSAFE catches a few type errors. If you see a compiler error
-//
-// "warning: division by zero in ..."
-//
-// when using ARRAYSIZE_UNSAFE, you are (wrongfully) giving it a pointer.
-// You should only use ARRAYSIZE_UNSAFE on statically allocated arrays.
-//
-// The following comments are on the implementation details, and can
-// be ignored by the users.
-//
-// ARRAYSIZE_UNSAFE(arr) works by inspecting sizeof(arr) (the # of bytes in
-// the array) and sizeof(*(arr)) (the # of bytes in one array
-// element). If the former is divisible by the latter, perhaps arr is
-// indeed an array, in which case the division result is the # of
-// elements in the array. Otherwise, arr cannot possibly be an array,
-// and we generate a compiler error to prevent the code from
-// compiling.
-//
-// Since the size of bool is implementation-defined, we need to cast
-// !(sizeof(a) & sizeof(*(a))) to size_t in order to ensure the final
-// result has type size_t.
-//
-// This macro is not perfect as it wrongfully accepts certain
-// pointers, namely where the pointer size is divisible by the pointee
-// size. Since all our code has to go through a 32-bit compiler,
-// where a pointer is 4 bytes, this means all pointers to a type whose
-// size is 3 or greater than 4 will be (righteously) rejected.
-#define ARRAYSIZE_UNSAFE(a) \
- ((sizeof(a) / sizeof(*(a))) / static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
-
#define SIZEOF_MEMBER(t, f) sizeof((reinterpret_cast<t*>(4096))->f) // NOLINT
#define OFFSETOF_MEMBER(t, f) \
@@ -137,9 +55,6 @@
#define PACKED(x) __attribute__ ((__aligned__(x), __packed__))
-#define LIKELY(x) __builtin_expect((x), true)
-#define UNLIKELY(x) __builtin_expect((x), false)
-
// Stringify the argument.
#define QUOTE(x) #x
#define STRINGIFY(x) QUOTE(x)
@@ -165,17 +80,6 @@
#endif
#define PURE __attribute__ ((__pure__))
-#define WARN_UNUSED __attribute__((warn_unused_result))
-
-// A deprecated function to call to create a false use of the parameter, for example:
-// int foo(int x) { UNUSED(x); return 10; }
-// to avoid compiler warnings. Going forward we prefer ATTRIBUTE_UNUSED.
-template<typename... T> void UNUSED(const T&...) {}
-
-// An attribute to place on a parameter to a function, for example:
-// int foo(int x ATTRIBUTE_UNUSED) { return 10; }
-// to avoid compiler warnings.
-#define ATTRIBUTE_UNUSED __attribute__((__unused__))
// Define that a position within code is unreachable, for example:
// int foo () { LOG(FATAL) << "Don't call me"; UNREACHABLE(); }
@@ -185,78 +89,7 @@
// Add the C++11 noreturn attribute.
#define NO_RETURN [[ noreturn ]] // NOLINT[whitespace/braces] [5]
-// The FALLTHROUGH_INTENDED macro can be used to annotate implicit fall-through
-// between switch labels:
-// switch (x) {
-// case 40:
-// case 41:
-// if (truth_is_out_there) {
-// ++x;
-// FALLTHROUGH_INTENDED; // Use instead of/along with annotations in
-// // comments.
-// } else {
-// return x;
-// }
-// case 42:
-// ...
-//
-// As shown in the example above, the FALLTHROUGH_INTENDED macro should be
-// followed by a semicolon. It is designed to mimic control-flow statements
-// like 'break;', so it can be placed in most places where 'break;' can, but
-// only if there are no statements on the execution path between it and the
-// next switch label.
-//
-// When compiled with clang in C++11 mode, the FALLTHROUGH_INTENDED macro is
-// expanded to [[clang::fallthrough]] attribute, which is analysed when
-// performing switch labels fall-through diagnostic ('-Wimplicit-fallthrough').
-// See clang documentation on language extensions for details:
-// http://clang.llvm.org/docs/LanguageExtensions.html#clang__fallthrough
-//
-// When used with unsupported compilers, the FALLTHROUGH_INTENDED macro has no
-// effect on diagnostics.
-//
-// In either case this macro has no effect on runtime behavior and performance
-// of code.
-#if __has_feature(cxx_attributes) && __has_warning("-Wimplicit-fallthrough")
-#define FALLTHROUGH_INTENDED [[clang::fallthrough]] // NOLINT
-#endif
-
-#ifndef FALLTHROUGH_INTENDED
-#define FALLTHROUGH_INTENDED do { } while (0)
-#endif
-
-// Annotalysis thread-safety analysis support.
-
-#define ACQUIRED_AFTER(...) __attribute__((acquired_after(__VA_ARGS__)))
-#define ACQUIRED_BEFORE(...) __attribute__((acquired_before(__VA_ARGS__)))
-#define GUARDED_BY(x) __attribute__((guarded_by(x)))
-#define GUARDED_VAR __attribute__((guarded))
-#define LOCK_RETURNED(x) __attribute__((lock_returned(x)))
-#define NO_THREAD_SAFETY_ANALYSIS __attribute__((no_thread_safety_analysis))
-#define PT_GUARDED_BY(x)
-// THREAD_ANNOTATION_ATTRIBUTE__(point_to_guarded_by(x))
-#define PT_GUARDED_VAR __attribute__((point_to_guarded))
-#define SCOPED_LOCKABLE __attribute__((scoped_lockable))
-
-#define EXCLUSIVE_LOCK_FUNCTION(...) __attribute__((exclusive_lock_function(__VA_ARGS__)))
-#define EXCLUSIVE_TRYLOCK_FUNCTION(...) __attribute__((exclusive_trylock_function(__VA_ARGS__)))
-#define SHARED_LOCK_FUNCTION(...) __attribute__((shared_lock_function(__VA_ARGS__)))
-#define SHARED_TRYLOCK_FUNCTION(...) __attribute__((shared_trylock_function(__VA_ARGS__)))
-#define UNLOCK_FUNCTION(...) __attribute__((unlock_function(__VA_ARGS__)))
-#define REQUIRES(...) __attribute__((requires_capability(__VA_ARGS__)))
-#define SHARED_REQUIRES(...) __attribute__((requires_shared_capability(__VA_ARGS__)))
-#define CAPABILITY(...) __attribute__((capability(__VA_ARGS__)))
-#define SHARED_CAPABILITY(...) __attribute__((shared_capability(__VA_ARGS__)))
-#define ASSERT_CAPABILITY(...) __attribute__((assert_capability(__VA_ARGS__)))
-#define ASSERT_SHARED_CAPABILITY(...) __attribute__((assert_shared_capability(__VA_ARGS__)))
-#define RETURN_CAPABILITY(...) __attribute__((lock_returned(__VA_ARGS__)))
-#define TRY_ACQUIRE(...) __attribute__((try_acquire_capability(__VA_ARGS__)))
-#define TRY_ACQUIRE_SHARED(...) __attribute__((try_acquire_shared_capability(__VA_ARGS__)))
-#define ACQUIRE(...) __attribute__((acquire_capability(__VA_ARGS__)))
-#define ACQUIRE_SHARED(...) __attribute__((acquire_shared_capability(__VA_ARGS__)))
-#define RELEASE(...) __attribute__((release_capability(__VA_ARGS__)))
-#define RELEASE_SHARED(...) __attribute__((release_shared_capability(__VA_ARGS__)))
-#define SCOPED_CAPABILITY __attribute__((scoped_lockable))
+// Annotalysis thread-safety analysis support. Things that are not in base.
#define LOCKABLE CAPABILITY("mutex")
#define SHARED_LOCKABLE SHARED_CAPABILITY("mutex")
diff --git a/runtime/base/stl_util.h b/runtime/base/stl_util.h
index a53dcea..a4cf249 100644
--- a/runtime/base/stl_util.h
+++ b/runtime/base/stl_util.h
@@ -20,7 +20,11 @@
#include <algorithm>
#include <sstream>
+// This header is used in the disassembler with libbase's logging. Only include ART logging
+// when no other logging macros are available. b/15436106, b/31338270
+#ifndef CHECK
#include "base/logging.h"
+#endif
namespace art {
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 96fa53c..6683f13 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -268,7 +268,7 @@
* Assumes "jobj" has already been validated.
*/
bool CheckInstanceFieldID(ScopedObjectAccess& soa, jobject java_object, jfieldID fid)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* o = soa.Decode<mirror::Object*>(java_object);
if (o == nullptr) {
AbortF("field operation on NULL object: %p", java_object);
@@ -312,7 +312,7 @@
*/
bool CheckMethodAndSig(ScopedObjectAccess& soa, jobject jobj, jclass jc,
jmethodID mid, Primitive::Type type, InvokeType invoke)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = CheckMethodID(soa, mid);
if (m == nullptr) {
return false;
@@ -359,7 +359,7 @@
* Assumes "java_class" has already been validated.
*/
bool CheckStaticFieldID(ScopedObjectAccess& soa, jclass java_class, jfieldID fid)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Class* c = soa.Decode<mirror::Class*>(java_class);
ArtField* f = CheckFieldID(soa, fid);
if (f == nullptr) {
@@ -382,7 +382,7 @@
* Instances of "java_class" must be instances of the method's declaring class.
*/
bool CheckStaticMethod(ScopedObjectAccess& soa, jclass java_class, jmethodID mid)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = CheckMethodID(soa, mid);
if (m == nullptr) {
return false;
@@ -403,7 +403,7 @@
* will be handled automatically by the instanceof check.)
*/
bool CheckVirtualMethod(ScopedObjectAccess& soa, jobject java_object, jmethodID mid)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = CheckMethodID(soa, mid);
if (m == nullptr) {
return false;
@@ -456,7 +456,7 @@
* Use the kFlag_NullableUtf flag where 'u' field(s) are nullable.
*/
bool Check(ScopedObjectAccess& soa, bool entry, const char* fmt, JniValueType* args)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* traceMethod = nullptr;
if (has_method_ && soa.Vm()->IsTracingEnabled()) {
// We need to guard some of the invocation interface's calls: a bad caller might
@@ -556,7 +556,7 @@
}
bool CheckReflectedMethod(ScopedObjectAccess& soa, jobject jmethod)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* method = soa.Decode<mirror::Object*>(jmethod);
if (method == nullptr) {
AbortF("expected non-null method");
@@ -574,7 +574,7 @@
}
bool CheckConstructor(ScopedObjectAccess& soa, jmethodID mid)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = soa.DecodeMethod(mid);
if (method == nullptr) {
AbortF("expected non-null constructor");
@@ -588,7 +588,7 @@
}
bool CheckReflectedField(ScopedObjectAccess& soa, jobject jfield)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* field = soa.Decode<mirror::Object*>(jfield);
if (field == nullptr) {
AbortF("expected non-null java.lang.reflect.Field");
@@ -604,7 +604,7 @@
}
bool CheckThrowable(ScopedObjectAccess& soa, jthrowable jobj)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* obj = soa.Decode<mirror::Object*>(jobj);
if (!obj->GetClass()->IsThrowableClass()) {
AbortF("expected java.lang.Throwable but got object of type "
@@ -615,7 +615,7 @@
}
bool CheckThrowableClass(ScopedObjectAccess& soa, jclass jc)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Class* c = soa.Decode<mirror::Class*>(jc);
if (!c->IsThrowableClass()) {
AbortF("expected java.lang.Throwable class but got object of "
@@ -646,7 +646,7 @@
}
bool CheckInstantiableNonArray(ScopedObjectAccess& soa, jclass jc)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Class* c = soa.Decode<mirror::Class*>(jc);
if (!c->IsInstantiableNonArray()) {
AbortF("can't make objects of type %s: %p", PrettyDescriptor(c).c_str(), c);
@@ -656,7 +656,7 @@
}
bool CheckPrimitiveArrayType(ScopedObjectAccess& soa, jarray array, Primitive::Type type)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!CheckArray(soa, array)) {
return false;
}
@@ -671,7 +671,7 @@
bool CheckFieldAccess(ScopedObjectAccess& soa, jobject obj, jfieldID fid, bool is_static,
Primitive::Type type)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (is_static && !CheckStaticFieldID(soa, down_cast<jclass>(obj), fid)) {
return false;
}
@@ -732,7 +732,7 @@
* to "running" mode before doing the checks.
*/
bool CheckInstance(ScopedObjectAccess& soa, InstanceKind kind, jobject java_object, bool null_ok)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const char* what = nullptr;
switch (kind) {
case kClass:
@@ -828,7 +828,7 @@
}
bool CheckPossibleHeapValue(ScopedObjectAccess& soa, char fmt, JniValueType arg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
switch (fmt) {
case 'a': // jarray
return CheckArray(soa, arg.a);
@@ -856,7 +856,7 @@
}
bool CheckVarArgs(ScopedObjectAccess& soa, const VarArgs* args_p)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(args_p != nullptr);
VarArgs args(args_p->Clone());
ArtMethod* m = CheckMethodID(soa, args.GetMethodID());
@@ -922,7 +922,7 @@
void TracePossibleHeapValue(ScopedObjectAccess& soa, bool entry, char fmt, JniValueType arg,
std::string* msg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
switch (fmt) {
case 'L': // jobject fall-through.
case 'a': // jarray fall-through.
@@ -1101,7 +1101,7 @@
* Since we're dealing with objects, switch to "running" mode.
*/
bool CheckArray(ScopedObjectAccess& soa, jarray java_array)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(java_array == nullptr)) {
AbortF("jarray was NULL");
return false;
@@ -1138,7 +1138,7 @@
}
ArtField* CheckFieldID(ScopedObjectAccess& soa, jfieldID fid)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (fid == nullptr) {
AbortF("jfieldID was NULL");
return nullptr;
@@ -1154,7 +1154,7 @@
}
ArtMethod* CheckMethodID(ScopedObjectAccess& soa, jmethodID mid)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (mid == nullptr) {
AbortF("jmethodID was NULL");
return nullptr;
@@ -1169,7 +1169,7 @@
return m;
}
- bool CheckThread(JNIEnv* env) SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool CheckThread(JNIEnv* env) REQUIRES_SHARED(Locks::mutator_lock_) {
Thread* self = Thread::Current();
if (self == nullptr) {
AbortF("a thread (tid %d) is making JNI calls without being attached", GetTid());
@@ -2855,7 +2855,7 @@
static bool CheckCallArgs(ScopedObjectAccess& soa, ScopedCheck& sc, JNIEnv* env, jobject obj,
jclass c, jmethodID mid, InvokeType invoke, const VarArgs* vargs)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
bool checked;
switch (invoke) {
case kVirtual: {
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index 0e2f9f2..843d4c1 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -28,10 +28,10 @@
// holding references.
class CheckReferenceMapVisitor : public StackVisitor {
public:
- explicit CheckReferenceMapVisitor(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_)
+ explicit CheckReferenceMapVisitor(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m->IsCalleeSaveMethod() || m->IsNative()) {
CHECK_EQ(GetDexPc(), DexFile::kDexNoIndex);
@@ -52,14 +52,14 @@
}
void CheckReferences(int* registers, int number_of_references, uint32_t native_pc_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(GetCurrentOatQuickMethodHeader()->IsOptimized());
CheckOptimizedMethod(registers, number_of_references, native_pc_offset);
}
private:
void CheckOptimizedMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
CodeInfo code_info = GetCurrentOatQuickMethodHeader()->GetOptimizedCodeInfo();
CodeInfoEncoding encoding = code_info.ExtractEncoding();
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 97aa499..caabcde 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -222,7 +222,7 @@
}
inline mirror::Class* ClassLinker::GetClassRoot(ClassRoot class_root)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!class_roots_.IsNull());
mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read();
mirror::Class* klass = class_roots->Get(class_root);
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index f4400c3..726e897 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -102,7 +102,7 @@
static void ThrowNoClassDefFoundError(const char* fmt, ...)
__attribute__((__format__(__printf__, 1, 2)))
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void ThrowNoClassDefFoundError(const char* fmt, ...) {
va_list args;
va_start(args, fmt);
@@ -112,7 +112,7 @@
}
static bool HasInitWithString(Thread* self, ClassLinker* class_linker, const char* descriptor)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = self->GetCurrentMethod(nullptr);
StackHandleScope<1> hs(self);
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(method != nullptr ?
@@ -133,7 +133,7 @@
// Helper for ThrowEarlierClassFailure. Throws the stored error.
static void HandleEarlierVerifyError(Thread* self, ClassLinker* class_linker, mirror::Class* c)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* obj = c->GetVerifyError();
DCHECK(obj != nullptr);
self->AssertNoPendingException();
@@ -201,7 +201,7 @@
}
static void VlogClassInitializationFailure(Handle<mirror::Class> klass)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (VLOG_IS_ON(class_linker)) {
std::string temp;
LOG(INFO) << "Failed to initialize class " << klass->GetDescriptor(&temp) << " from "
@@ -210,7 +210,7 @@
}
static void WrapExceptionInInitializer(Handle<mirror::Class> klass)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Thread* self = Thread::Current();
JNIEnv* env = self->GetJniEnv();
@@ -272,7 +272,7 @@
MemberOffset* field_offset,
std::deque<ArtField*>* grouped_and_sorted_fields,
FieldGaps* gaps)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(current_field_idx != nullptr);
DCHECK(grouped_and_sorted_fields != nullptr);
DCHECK(gaps != nullptr);
@@ -761,7 +761,7 @@
static void SanityCheckArtMethod(ArtMethod* m,
mirror::Class* expected_class,
const std::vector<gc::space::ImageSpace*>& spaces)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (m->IsRuntimeMethod()) {
mirror::Class* declaring_class = m->GetDeclaringClassUnchecked();
CHECK(declaring_class == nullptr) << declaring_class << " " << PrettyMethod(m);
@@ -790,7 +790,7 @@
mirror::Class* expected_class,
PointerSize pointer_size,
const std::vector<gc::space::ImageSpace*>& spaces)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(arr != nullptr);
for (int32_t j = 0; j < arr->GetLength(); ++j) {
auto* method = arr->GetElementPtrSize<ArtMethod*>(j, pointer_size);
@@ -808,7 +808,7 @@
size_t size,
PointerSize pointer_size,
const std::vector<gc::space::ImageSpace*>& spaces)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK_EQ(arr != nullptr, size != 0u);
if (arr != nullptr) {
bool contains = false;
@@ -832,7 +832,7 @@
}
static void SanityCheckObjectsCallback(mirror::Object* obj, void* arg ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
CHECK(obj->GetClass() != nullptr) << "Null class in object " << obj;
CHECK(obj->GetClass()->GetClass() != nullptr) << "Null class class " << obj;
@@ -883,7 +883,7 @@
explicit SetInterpreterEntrypointArtMethodVisitor(PointerSize image_pointer_size)
: image_pointer_size_(image_pointer_size) {}
- void Visit(ArtMethod* method) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Visit(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
if (kIsDebugBuild && !method->IsRuntimeMethod()) {
CHECK(method->GetDeclaringClass() != nullptr);
}
@@ -1073,7 +1073,7 @@
static mirror::String* GetDexPathListElementName(ScopedObjectAccessUnchecked& soa,
mirror::Object* element)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* const dex_file_field =
soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
ArtField* const dex_file_name_field =
@@ -1096,7 +1096,7 @@
static bool FlattenPathClassLoader(mirror::ClassLoader* class_loader,
std::list<mirror::String*>* out_dex_file_names,
std::string* error_msg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(out_dex_file_names != nullptr);
DCHECK(error_msg != nullptr);
ScopedObjectAccessUnchecked soa(Thread::Current());
@@ -1147,7 +1147,7 @@
public:
explicit FixupArtMethodArrayVisitor(const ImageHeader& header) : header_(header) {}
- virtual void Visit(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) {
+ virtual void Visit(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
GcRoot<mirror::Class>* resolved_types = method->GetDexCacheResolvedTypes(kRuntimePointerSize);
const bool is_copied = method->IsCopied();
if (resolved_types != nullptr) {
@@ -1194,7 +1194,7 @@
explicit VerifyClassInTableArtMethodVisitor(ClassTable* table) : table_(table) {}
virtual void Visit(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::classlinker_classes_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::classlinker_classes_lock_) {
mirror::Class* klass = method->GetDeclaringClass();
if (klass != nullptr && !Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) {
CHECK_EQ(table_->LookupByDescriptor(klass), klass) << PrettyClass(klass);
@@ -1207,11 +1207,11 @@
class VerifyDeclaringClassVisitor : public ArtMethodVisitor {
public:
- VerifyDeclaringClassVisitor() SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
+ VerifyDeclaringClassVisitor() REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
: live_bitmap_(Runtime::Current()->GetHeap()->GetLiveBitmap()) {}
virtual void Visit(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
mirror::Class* klass = method->GetDeclaringClassUnchecked();
if (klass != nullptr) {
CHECK(live_bitmap_->Test(klass)) << "Image method has unmarked declaring class";
@@ -1476,7 +1476,7 @@
class_loader_(class_loader),
forward_strings_(forward_strings) {}
- bool operator()(mirror::Class* klass) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool operator()(mirror::Class* klass) const REQUIRES_SHARED(Locks::mutator_lock_) {
if (forward_strings_) {
mirror::StringDexCacheType* strings = klass->GetDexCacheStrings();
if (strings != nullptr) {
@@ -1503,7 +1503,7 @@
static std::unique_ptr<const DexFile> OpenOatDexFile(const OatFile* oat_file,
const char* location,
std::string* error_msg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(error_msg != nullptr);
std::unique_ptr<const DexFile> dex_file;
const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(location, nullptr, error_msg);
@@ -1894,7 +1894,7 @@
done_(false) {}
void Visit(mirror::ClassLoader* class_loader)
- SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
ClassTable* const class_table = class_loader->GetClassTable();
if (!done_ && class_table != nullptr && !class_table->Visit(*visitor_)) {
// If the visitor ClassTable returns false it means that we don't need to continue.
@@ -1944,7 +1944,7 @@
explicit GetClassInToObjectArray(mirror::ObjectArray<mirror::Class>* arr)
: arr_(arr), index_(0) {}
- bool operator()(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool operator()(mirror::Class* klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
++index_;
if (index_ <= arr_->GetLength()) {
arr_->Set(index_ - 1, klass);
@@ -1953,7 +1953,7 @@
return false;
}
- bool Succeeded() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool Succeeded() const REQUIRES_SHARED(Locks::mutator_lock_) {
return index_ <= arr_->GetLength();
}
@@ -3669,7 +3669,7 @@
}
static std::vector<mirror::ObjectArray<mirror::DexCache>*> GetImageDexCaches(
- std::vector<gc::space::ImageSpace*> image_spaces) SHARED_REQUIRES(Locks::mutator_lock_) {
+ std::vector<gc::space::ImageSpace*> image_spaces) REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(!image_spaces.empty());
std::vector<mirror::ObjectArray<mirror::DexCache>*> dex_caches_vector;
for (gc::space::ImageSpace* image_space : image_spaces) {
@@ -3731,7 +3731,7 @@
void Visit(mirror::ClassLoader* class_loader)
REQUIRES(Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE {
ClassTable* const class_table = class_loader->GetClassTable();
if (class_table != nullptr) {
class_table->FreezeSnapshot();
@@ -3777,7 +3777,7 @@
result_(result) {}
void Visit(mirror::ClassLoader* class_loader)
- SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
ClassTable* const class_table = class_loader->GetClassTable();
mirror::Class* klass = class_table->Lookup(descriptor_, hash_);
if (klass != nullptr) {
@@ -4758,7 +4758,7 @@
bool ClassLinker::WaitForInitializeClass(Handle<mirror::Class> klass,
Thread* self,
ObjectLock<mirror::Class>& lock)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
while (true) {
self->AssertNoPendingException();
CHECK(!klass->IsInitialized());
@@ -4802,7 +4802,7 @@
Handle<mirror::Class> super_klass,
ArtMethod* method,
ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(Thread::Current()->IsExceptionPending());
DCHECK(!m->IsProxyMethod());
const DexFile* dex_file = m->GetDexFile();
@@ -4827,7 +4827,7 @@
ArtMethod* m,
uint32_t index,
uint32_t arg_type_idx)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(Thread::Current()->IsExceptionPending());
DCHECK(!m->IsProxyMethod());
const DexFile* dex_file = m->GetDexFile();
@@ -4847,7 +4847,7 @@
Handle<mirror::Class> super_klass,
ArtMethod* method,
const std::string& error_msg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ThrowLinkageError(klass.Get(),
"Class %s method %s resolves differently in %s %s: %s",
PrettyDescriptor(klass.Get()).c_str(),
@@ -4863,7 +4863,7 @@
Handle<mirror::Class> super_klass,
ArtMethod* method1,
ArtMethod* method2)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
{
StackHandleScope<1> hs(self);
Handle<mirror::Class> return_type(hs.NewHandle(method1->GetReturnType(true /* resolve */,
@@ -5075,7 +5075,7 @@
}
static ImTable* FindSuperImt(mirror::Class* klass, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
while (klass->HasSuperClass()) {
klass = klass->GetSuperClass();
if (klass->ShouldHaveImt()) {
@@ -5353,7 +5353,7 @@
const DexFile& dex_file,
const DexFile::ClassDef& class_def,
mirror::Class* super_class)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Check for unexpected changes in the superclass.
// Quick check 1) is the super_class class-loader the boot class loader? This always has
// precedence.
@@ -5565,7 +5565,7 @@
class MethodNameAndSignatureComparator FINAL : public ValueObject {
public:
explicit MethodNameAndSignatureComparator(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) :
+ REQUIRES_SHARED(Locks::mutator_lock_) :
dex_file_(method->GetDexFile()), mid_(&dex_file_->GetMethodId(method->GetDexMethodIndex())),
name_(nullptr), name_len_(0) {
DCHECK(!method->IsProxyMethod()) << PrettyMethod(method);
@@ -5579,7 +5579,7 @@
}
bool HasSameNameAndSignature(ArtMethod* other)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!other->IsProxyMethod()) << PrettyMethod(other);
const DexFile* other_dex_file = other->GetDexFile();
const DexFile::MethodId& other_mid = other_dex_file->GetMethodId(other->GetDexMethodIndex());
@@ -5620,7 +5620,7 @@
std::fill(hash_table_, hash_table_ + hash_size_, invalid_index_);
}
- void Add(uint32_t virtual_method_index) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Add(uint32_t virtual_method_index) REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* local_method = klass_->GetVirtualMethodDuringLinking(
virtual_method_index, image_pointer_size_);
const char* name = local_method->GetInterfaceMethodIfProxy(image_pointer_size_)->GetName();
@@ -5636,7 +5636,7 @@
}
uint32_t FindAndRemove(MethodNameAndSignatureComparator* comparator)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const char* name = comparator->GetName();
uint32_t hash = ComputeModifiedUtf8Hash(name);
size_t index = hash % hash_size_;
@@ -5936,7 +5936,7 @@
size_t ifstart,
Handle<mirror::Class> iface,
PointerSize image_pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(self != nullptr);
DCHECK(iface.Get() != nullptr);
DCHECK(iftable.Get() != nullptr);
@@ -6336,7 +6336,7 @@
static bool NotSubinterfaceOfAny(const std::unordered_set<mirror::Class*>& classes,
mirror::Class* val)
REQUIRES(Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(val != nullptr);
for (auto c : classes) {
if (val->IsAssignableFrom(&*c)) {
@@ -6369,7 +6369,7 @@
size_t super_ifcount,
std::vector<mirror::Class*> to_process)
REQUIRES(Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// This is the set of all class's already in the iftable. Used to make checking if a class has
// already been added quicker.
std::unordered_set<mirror::Class*> classes_in_iftable;
@@ -6537,7 +6537,7 @@
// methods must be unique.
static ArtMethod* FindSameNameAndSignature(MethodNameAndSignatureComparator& cmp,
const ScopedArenaVector<ArtMethod*>& list)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
for (ArtMethod* method : list) {
if (cmp.HasSameNameAndSignature(method)) {
return method;
@@ -6547,7 +6547,7 @@
}
static void SanityCheckVTable(Handle<mirror::Class> klass, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::PointerArray* check_vtable = klass->GetVTableDuringLinking();
mirror::Class* superclass = (klass->HasSuperClass()) ? klass->GetSuperClass() : nullptr;
int32_t super_vtable_length = (superclass != nullptr) ? superclass->GetVTableLength() : 0;
@@ -7165,7 +7165,7 @@
}
struct LinkFieldsComparator {
- explicit LinkFieldsComparator() SHARED_REQUIRES(Locks::mutator_lock_) {
+ explicit LinkFieldsComparator() REQUIRES_SHARED(Locks::mutator_lock_) {
}
// No thread safety analysis as will be called from STL. Checked lock held in constructor.
bool operator()(ArtField* field1, ArtField* field2)
@@ -7814,7 +7814,7 @@
public:
explicit DumpClassVisitor(int flags) : flags_(flags) {}
- bool operator()(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool operator()(mirror::Class* klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
klass->DumpClass(LOG(ERROR), flags_);
return true;
}
@@ -7886,7 +7886,7 @@
CountClassesVisitor() : num_zygote_classes(0), num_non_zygote_classes(0) {}
void Visit(mirror::ClassLoader* class_loader)
- SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
ClassTable* const class_table = class_loader->GetClassTable();
if (class_table != nullptr) {
num_zygote_classes += class_table->NumZygoteClasses();
@@ -8285,7 +8285,7 @@
: method_(method),
pointer_size_(pointer_size) {}
- bool operator()(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE {
+ bool operator()(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE {
if (klass->GetVirtualMethodsSliceUnchecked(pointer_size_).Contains(method_)) {
holder_ = klass;
}
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index c3ab8c5..4bd1bd2 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -76,7 +76,7 @@
public:
virtual ~ClassLoaderVisitor() {}
virtual void Visit(mirror::ClassLoader* class_loader)
- SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) = 0;
};
class ClassLinker {
@@ -129,12 +129,12 @@
// Initialize class linker by bootstraping from dex files.
bool InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> boot_class_path,
std::string* error_msg)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
// Initialize class linker from one or more boot images.
bool InitFromBootImage(std::string* error_msg)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
// Add an image space to the class linker, may fix up classloader fields and dex cache fields.
@@ -149,20 +149,20 @@
std::vector<std::unique_ptr<const DexFile>>* out_dex_files,
std::string* error_msg)
REQUIRES(!dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool OpenImageDexFiles(gc::space::ImageSpace* space,
std::vector<std::unique_ptr<const DexFile>>* out_dex_files,
std::string* error_msg)
REQUIRES(!dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Finds a class by its descriptor, loading it if necessary.
// If class_loader is null, searches boot_class_path_.
mirror::Class* FindClass(Thread* self,
const char* descriptor,
Handle<mirror::ClassLoader> class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
// Finds a class in the path class loader, loading it if necessary without using JNI. Hash
@@ -176,18 +176,18 @@
size_t hash,
Handle<mirror::ClassLoader> class_loader,
mirror::Class** result)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
// Finds a class by its descriptor using the "system" class loader, ie by searching the
// boot_class_path_.
mirror::Class* FindSystemClass(Thread* self, const char* descriptor)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
// Finds the array class given for the element class.
mirror::Class* FindArrayClass(Thread* self, mirror::Class** element_class)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
// Returns true if the class linker is initialized.
@@ -202,7 +202,7 @@
Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
const DexFile::ClassDef& dex_class_def)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
// Finds a class by its descriptor, returning null if it isn't wasn't loaded
@@ -212,65 +212,65 @@
size_t hash,
mirror::ClassLoader* class_loader)
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Finds all the classes with the given descriptor, regardless of ClassLoader.
void LookupClasses(const char* descriptor, std::vector<mirror::Class*>& classes)
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::Class* FindPrimitiveClass(char type) SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Class* FindPrimitiveClass(char type) REQUIRES_SHARED(Locks::mutator_lock_);
// General class unloading is not supported, this is used to prune
// unwanted classes during image writing.
bool RemoveClass(const char* descriptor, mirror::ClassLoader* class_loader)
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void DumpAllClasses(int flags)
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void DumpForSigQuit(std::ostream& os) REQUIRES(!Locks::classlinker_classes_lock_);
size_t NumLoadedClasses()
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Resolve a String with the given index from the DexFile, storing the
// result in the DexCache. The referrer is used to identify the
// target DexCache and ClassLoader to use for resolution.
mirror::String* ResolveString(uint32_t string_idx, ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Resolve a String with the given index from the DexFile, storing the
// result in the DexCache.
mirror::String* ResolveString(const DexFile& dex_file, uint32_t string_idx,
Handle<mirror::DexCache> dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Find a String with the given index from the DexFile, storing the
// result in the DexCache if found. Return null if not found.
mirror::String* LookupString(const DexFile& dex_file, uint32_t string_idx,
Handle<mirror::DexCache> dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Resolve a Type with the given index from the DexFile, storing the
// result in the DexCache. The referrer is used to identify the
// target DexCache and ClassLoader to use for resolution.
mirror::Class* ResolveType(const DexFile& dex_file, uint16_t type_idx, mirror::Class* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
// Resolve a Type with the given index from the DexFile, storing the
// result in the DexCache. The referrer is used to identify the
// target DexCache and ClassLoader to use for resolution.
mirror::Class* ResolveType(uint16_t type_idx, ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
mirror::Class* ResolveType(uint16_t type_idx, ArtField* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
// Resolve a type with the given ID from the DexFile, storing the
@@ -281,7 +281,7 @@
uint16_t type_idx,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
// Determine whether a dex cache result should be trusted, or an IncompatibleClassChangeError
@@ -303,11 +303,11 @@
Handle<mirror::ClassLoader> class_loader,
ArtMethod* referrer,
InvokeType type)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
ArtMethod* GetResolvedMethod(uint32_t method_idx, ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// This returns the class referred to by GetMethodId(method_idx).class_idx_. This might be
// different then the declaring class of the resolved method due to copied
@@ -315,25 +315,25 @@
mirror::Class* ResolveReferencedClassOfMethod(uint32_t method_idx,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
template <ResolveMode kResolveMode>
ArtMethod* ResolveMethod(Thread* self, uint32_t method_idx, ArtMethod* referrer, InvokeType type)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
ArtMethod* ResolveMethodWithoutInvokeType(const DexFile& dex_file,
uint32_t method_idx,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
ArtField* GetResolvedField(uint32_t field_idx, mirror::Class* field_declaring_class)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtField* GetResolvedField(uint32_t field_idx, mirror::DexCache* dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtField* ResolveField(uint32_t field_idx, ArtMethod* referrer, bool is_static)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
// Resolve a field with a given ID from the DexFile, storing the
@@ -344,7 +344,7 @@
ArtField* ResolveField(const DexFile& dex_file, uint32_t field_idx,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, bool is_static)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
// Resolve a field with a given ID from the DexFile, storing the
@@ -355,12 +355,12 @@
uint32_t field_idx,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
// Get shorty from method index without resolution. Used to do handlerization.
const char* MethodShorty(uint32_t method_idx, ArtMethod* referrer, uint32_t* length)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true on success, false if there's an exception pending.
// can_run_clinit=false allows the compiler to attempt to init a class,
@@ -369,22 +369,22 @@
Handle<mirror::Class> c,
bool can_init_fields,
bool can_init_parents)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
// Initializes classes that have instances in the image but that have
// <clinit> methods so they could not be initialized by the compiler.
void RunRootClinits()
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
mirror::DexCache* RegisterDexFile(const DexFile& dex_file,
mirror::ClassLoader* class_loader)
REQUIRES(!dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void RegisterDexFile(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
REQUIRES(!dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const std::vector<const DexFile*>& GetBootClassPath() {
return boot_class_path_;
@@ -392,49 +392,49 @@
void VisitClasses(ClassVisitor* visitor)
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Less efficient variant of VisitClasses that copies the class_table_ into secondary storage
// so that it can visit individual classes without holding the doesn't hold the
// Locks::classlinker_classes_lock_. As the Locks::classlinker_classes_lock_ isn't held this code
// can race with insertion and deletion of classes while the visitor is being called.
void VisitClassesWithoutClassesLock(ClassVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
void VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags)
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
REQUIRES(!dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::DexCache* FindDexCache(Thread* self,
const DexFile& dex_file,
bool allow_failure = false)
REQUIRES(!dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void FixupDexCaches(ArtMethod* resolution_method)
REQUIRES(!dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Allocate an instance of a java.lang.Object.
mirror::Object* AllocObject(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
// TODO: replace this with multiple methods that allocate the correct managed type.
template <class T>
mirror::ObjectArray<T>* AllocObjectArray(Thread* self, size_t length)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
mirror::ObjectArray<mirror::Class>* AllocClassArray(Thread* self, size_t length)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
mirror::ObjectArray<mirror::String>* AllocStringArray(Thread* self, size_t length)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
LengthPrefixedArray<ArtField>* AllocArtFieldArray(Thread* self,
@@ -446,33 +446,33 @@
size_t length);
mirror::PointerArray* AllocPointerArray(Thread* self, size_t length)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
mirror::IfTable* AllocIfTable(Thread* self, size_t ifcount)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
mirror::ObjectArray<mirror::StackTraceElement>* AllocStackTraceElementArray(Thread* self,
size_t length)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
void VerifyClass(Thread* self,
Handle<mirror::Class> klass,
LogSeverity log_level = LogSeverity::NONE)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
bool VerifyClassUsingOatFile(const DexFile& dex_file,
mirror::Class* klass,
mirror::Class::Status& oat_file_class_status)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
void ResolveClassExceptionHandlerTypes(Handle<mirror::Class> klass)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
void ResolveMethodExceptionHandlerTypes(ArtMethod* klass)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
mirror::Class* CreateProxyClass(ScopedObjectAccessAlreadyRunnable& soa,
@@ -481,31 +481,31 @@
jobject loader,
jobjectArray methods,
jobjectArray throws)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
std::string GetDescriptorForProxy(mirror::Class* proxy_class)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ArtMethod* FindMethodForProxy(mirror::Class* proxy_class, ArtMethod* proxy_method)
REQUIRES(!dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get the oat code for a method when its class isn't yet initialized
const void* GetQuickOatCodeFor(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get compiled code for a method, return null if no code
// exists. This is unlike Get..OatCodeFor which will return a bridge
// or interpreter entrypoint.
const void* GetOatMethodQuickCodeFor(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const OatFile::OatMethod FindOatMethodFor(ArtMethod* method, bool* found)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
pid_t GetClassesLockOwner(); // For SignalCatcher.
pid_t GetDexLockOwner(); // For SignalCatcher.
- mirror::Class* GetClassRoot(ClassRoot class_root) SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Class* GetClassRoot(ClassRoot class_root) REQUIRES_SHARED(Locks::mutator_lock_);
static const char* GetClassRootDescriptor(ClassRoot class_root);
@@ -524,20 +524,20 @@
// Set the entrypoints up for method to the given code.
void SetEntryPointsToCompiledCode(ArtMethod* method, const void* method_code) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Set the entrypoints up for method to the enter the interpreter.
void SetEntryPointsToInterpreter(ArtMethod* method) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Attempts to insert a class into a class table. Returns null if
// the class was inserted, otherwise returns an existing class with
// the same descriptor and ClassLoader.
mirror::Class* InsertClass(const char* descriptor, mirror::Class* klass, size_t hash)
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::ObjectArray<mirror::Class>* GetClassRoots() SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::ObjectArray<mirror::Class>* GetClassRoots() REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read();
DCHECK(class_roots != nullptr);
return class_roots;
@@ -546,25 +546,25 @@
// Move all of the boot image classes into the class table for faster lookups.
void AddBootImageClassesToClassTable()
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Add image classes to the class table.
void AddImageClassesToClassTable(std::vector<gc::space::ImageSpace*> image_spaces,
mirror::ClassLoader* class_loader)
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Move the class table to the pre-zygote table to reduce memory usage. This works by ensuring
// that no more classes are ever added to the pre zygote table which makes it that the pages
// always remain shared dirty instead of private dirty.
void MoveClassTableToPreZygote()
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Creates a GlobalRef PathClassLoader that can be used to load classes from the given dex files.
// Note: the objects are not completely set up. Do not use this outside of tests and the compiler.
jobject CreatePathClassLoader(Thread* self, const std::vector<const DexFile*>& dex_files)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
PointerSize GetImagePointerSize() const {
@@ -574,37 +574,37 @@
// Used by image writer for checking.
bool ClassInClassTable(mirror::Class* klass)
REQUIRES(Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* CreateRuntimeMethod(LinearAlloc* linear_alloc);
// Clear the ArrayClass cache. This is necessary when cleaning up for the image, as the cache
// entries are roots, but potentially not image classes.
- void DropFindArrayClassCache() SHARED_REQUIRES(Locks::mutator_lock_);
+ void DropFindArrayClassCache() REQUIRES_SHARED(Locks::mutator_lock_);
// Clean up class loaders, this needs to happen after JNI weak globals are cleared.
void CleanupClassLoaders()
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Unlike GetOrCreateAllocatorForClassLoader, GetAllocatorForClassLoader asserts that the
// allocator for this class loader is already created.
LinearAlloc* GetAllocatorForClassLoader(mirror::ClassLoader* class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Return the linear alloc for a class loader if it is already allocated, otherwise allocate and
// set it. TODO: Consider using a lock other than classlinker_classes_lock_.
LinearAlloc* GetOrCreateAllocatorForClassLoader(mirror::ClassLoader* class_loader)
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// May be called with null class_loader due to legacy code. b/27954959
void InsertDexFileInToClassLoader(mirror::Object* dex_file, mirror::ClassLoader* class_loader)
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool ShouldUseInterpreterEntrypoint(ArtMethod* method, const void* quick_code)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
std::set<DexCacheResolvedClasses> GetResolvedClasses(bool ignore_boot_classes)
REQUIRES(!dex_lock_);
@@ -615,14 +615,14 @@
static bool IsBootClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
mirror::ClassLoader* class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* AddMethodToConflictTable(mirror::Class* klass,
ArtMethod* conflict_method,
ArtMethod* interface_method,
ArtMethod* method,
bool force_new_conflict_method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Create a conflict table with a specified capacity.
ImtConflictTable* CreateImtConflictTable(size_t count, LinearAlloc* linear_alloc);
@@ -634,23 +634,23 @@
// Create the IMT and conflict tables for a class.
- void FillIMTAndConflictTables(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ void FillIMTAndConflictTables(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
// Clear class table strong roots (other than classes themselves). This is done by dex2oat to
// allow pruning dex caches.
void ClearClassTableStrongRoots() const
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Throw the class initialization failure recorded when first trying to initialize the given
// class.
void ThrowEarlierClassFailure(mirror::Class* c, bool wrap_in_no_class_def = false)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
// Get the actual holding class for a copied method. Pretty slow, don't call often.
mirror::Class* GetHoldingClassOfCopiedMethod(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
struct DexCacheData {
// Weak root to the DexCache. Note: Do not decode this unnecessarily or else class unloading may
@@ -677,67 +677,67 @@
Handle<mirror::Class> klass,
Handle<mirror::Class> supertype)
REQUIRES(!dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void DeleteClassLoader(Thread* self, const ClassLoaderData& data)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void VisitClassLoaders(ClassLoaderVisitor* visitor) const
- SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
void VisitClassesInternal(ClassVisitor* visitor)
- SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
// Returns the number of zygote and image classes.
size_t NumZygoteClasses() const
REQUIRES(Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the number of non zygote nor image classes.
size_t NumNonZygoteClasses() const
REQUIRES(Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void FinishInit(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
// For early bootstrapping by Init
mirror::Class* AllocClass(Thread* self, mirror::Class* java_lang_Class, uint32_t class_size)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
// Alloc* convenience functions to avoid needing to pass in mirror::Class*
// values that are known to the ClassLinker such as
// kObjectArrayClass and kJavaLangString etc.
mirror::Class* AllocClass(Thread* self, uint32_t class_size)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
mirror::DexCache* AllocDexCache(Thread* self,
const DexFile& dex_file,
LinearAlloc* linear_alloc)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
mirror::Class* CreatePrimitiveClass(Thread* self, Primitive::Type type)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
mirror::Class* InitializePrimitiveClass(mirror::Class* primitive_class, Primitive::Type type)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
mirror::Class* CreateArrayClass(Thread* self,
const char* descriptor,
size_t hash,
Handle<mirror::ClassLoader> class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
void AppendToBootClassPath(Thread* self, const DexFile& dex_file)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
void AppendToBootClassPath(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
// Precomputes size needed for Class, in the case of a non-temporary class this size must be
@@ -751,86 +751,86 @@
const DexFile::ClassDef& dex_class_def,
Handle<mirror::Class> klass,
mirror::ClassLoader* class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void LoadClass(Thread* self,
const DexFile& dex_file,
const DexFile::ClassDef& dex_class_def,
Handle<mirror::Class> klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void LoadClassMembers(Thread* self,
const DexFile& dex_file,
const uint8_t* class_data,
Handle<mirror::Class> klass,
const OatFile::OatClass* oat_class)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void LoadField(const ClassDataItemIterator& it, Handle<mirror::Class> klass, ArtField* dst)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void LoadMethod(Thread* self,
const DexFile& dex_file,
const ClassDataItemIterator& it,
Handle<mirror::Class> klass, ArtMethod* dst)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- void FixupStaticTrampolines(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ void FixupStaticTrampolines(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
// Finds the associated oat class for a dex_file and descriptor. Returns an invalid OatClass on
// error and sets found to false.
OatFile::OatClass FindOatClass(const DexFile& dex_file, uint16_t class_def_idx, bool* found)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void RegisterDexFileLocked(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
REQUIRES(dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::DexCache* FindDexCacheLocked(Thread* self, const DexFile& dex_file, bool allow_failure)
REQUIRES(dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool InitializeClass(Thread* self,
Handle<mirror::Class> klass,
bool can_run_clinit,
bool can_init_parents)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
bool InitializeDefaultInterfaceRecursive(Thread* self,
Handle<mirror::Class> klass,
bool can_run_clinit,
bool can_init_parents)
REQUIRES(!dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool WaitForInitializeClass(Handle<mirror::Class> klass,
Thread* self,
ObjectLock<mirror::Class>& lock);
bool ValidateSuperClassDescriptors(Handle<mirror::Class> klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool IsSameDescriptorInDifferentClassContexts(Thread* self,
const char* descriptor,
Handle<mirror::ClassLoader> class_loader1,
Handle<mirror::ClassLoader> class_loader2)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool IsSameMethodSignatureInDifferentClassContexts(Thread* self,
ArtMethod* method,
mirror::Class* klass1,
mirror::Class* klass2)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool LinkClass(Thread* self,
const char* descriptor,
Handle<mirror::Class> klass,
Handle<mirror::ObjectArray<mirror::Class>> interfaces,
MutableHandle<mirror::Class>* h_new_class_out)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::classlinker_classes_lock_);
bool LinkSuperClass(Handle<mirror::Class> klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool LoadSuperAndInterfaces(Handle<mirror::Class> klass, const DexFile& dex_file)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
bool LinkMethods(Thread* self,
@@ -838,12 +838,12 @@
Handle<mirror::ObjectArray<mirror::Class>> interfaces,
bool* out_new_conflict,
ArtMethod** out_imt)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Does anything needed to make sure that the compiler will not generate a direct invoke to this
// method. Should only be called on non-invokable methods.
void EnsureThrowsInvocationError(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// A wrapper class representing the result of a method translation used for linking methods and
// updating superclass default methods. For each method in a classes vtable there are 4 states it
@@ -937,14 +937,14 @@
Thread* self,
Handle<mirror::Class> klass,
/*out*/std::unordered_map<size_t, MethodTranslation>* default_translations)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Sets up the interface lookup table (IFTable) in the correct order to allow searching for
// default methods.
bool SetupInterfaceLookupTable(Thread* self,
Handle<mirror::Class> klass,
Handle<mirror::ObjectArray<mirror::Class>> interfaces)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
enum class DefaultMethodSearchResult {
@@ -976,7 +976,7 @@
ArtMethod* target_method,
Handle<mirror::Class> klass,
/*out*/ArtMethod** out_default_method) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Sets the imt entries and fixes up the vtable for the given class by linking all the interface
// methods. See LinkVirtualMethods for an explanation of what default_translations is.
@@ -986,67 +986,67 @@
const std::unordered_map<size_t, MethodTranslation>& default_translations,
bool* out_new_conflict,
ArtMethod** out_imt)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool LinkStaticFields(Thread* self, Handle<mirror::Class> klass, size_t* class_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool LinkInstanceFields(Thread* self, Handle<mirror::Class> klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_static, size_t* class_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void LinkCode(ArtMethod* method,
const OatFile::OatClass* oat_class,
uint32_t class_def_method_index)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CreateReferenceInstanceOffsets(Handle<mirror::Class> klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CheckProxyConstructor(ArtMethod* constructor) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CheckProxyMethod(ArtMethod* method, ArtMethod* prototype) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// For use by ImageWriter to find DexCaches for its roots
ReaderWriterMutex* DexLock()
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
LOCK_RETURNED(dex_lock_) {
return &dex_lock_;
}
- size_t GetDexCacheCount() SHARED_REQUIRES(Locks::mutator_lock_, dex_lock_) {
+ size_t GetDexCacheCount() REQUIRES_SHARED(Locks::mutator_lock_, dex_lock_) {
return dex_caches_.size();
}
const std::list<DexCacheData>& GetDexCachesData()
- SHARED_REQUIRES(Locks::mutator_lock_, dex_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, dex_lock_) {
return dex_caches_;
}
void CreateProxyConstructor(Handle<mirror::Class> klass, ArtMethod* out)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CreateProxyMethod(Handle<mirror::Class> klass, ArtMethod* prototype, ArtMethod* out)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Ensures that methods have the kAccSkipAccessChecks bit set. We use the
// kAccVerificationAttempted bit on the class access flags to determine whether this has been done
// before.
void EnsureSkipAccessChecksMethods(Handle<mirror::Class> c)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Class* LookupClassFromBootImage(const char* descriptor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Register a class loader and create its class table and allocator. Should not be called if
// these are already created.
void RegisterClassLoader(mirror::ClassLoader* class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::classlinker_classes_lock_);
// Returns null if not found.
ClassTable* ClassTableForClassLoader(mirror::ClassLoader* class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Insert a new class table if not found.
ClassTable* InsertClassTableForClassLoader(mirror::ClassLoader* class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::classlinker_classes_lock_);
// EnsureResolved is called to make sure that a class in the class_table_ has been resolved
@@ -1057,24 +1057,24 @@
// the class passed in.
mirror::Class* EnsureResolved(Thread* self, const char* descriptor, mirror::Class* klass)
WARN_UNUSED
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
void FixupTemporaryDeclaringClass(mirror::Class* temp_class, mirror::Class* new_class)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SetClassRoot(ClassRoot class_root, mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Return the quick generic JNI stub for testing.
const void* GetRuntimeQuickGenericJniStub() const;
bool CanWeInitializeClass(mirror::Class* klass, bool can_init_statics, bool can_init_parents)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void UpdateClassMethods(mirror::Class* klass,
LengthPrefixedArray<ArtMethod>* new_methods)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::classlinker_classes_lock_);
// new_class_set is the set of classes that were read from the class table section in the image.
@@ -1087,12 +1087,12 @@
bool* out_forward_dex_cache_array,
std::string* out_error_msg)
REQUIRES(!dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Check that c1 == FindSystemClass(self, descriptor). Abort with class dumps otherwise.
void CheckSystemClass(Thread* self, Handle<mirror::Class> c1, const char* descriptor)
REQUIRES(!dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Sets imt_ref appropriately for LinkInterfaceMethods.
// If there is no method in the imt location of imt_ref it will store the given method there.
@@ -1102,7 +1102,7 @@
ArtMethod* imt_conflict_method,
ArtMethod* current_method,
/*out*/bool* new_conflict,
- /*out*/ArtMethod** imt_ref) SHARED_REQUIRES(Locks::mutator_lock_);
+ /*out*/ArtMethod** imt_ref) REQUIRES_SHARED(Locks::mutator_lock_);
void FillIMTFromIfTable(mirror::IfTable* if_table,
ArtMethod* unimplemented_method,
@@ -1111,13 +1111,13 @@
bool create_conflict_tables,
bool ignore_copied_methods,
/*out*/bool* new_conflict,
- /*out*/ArtMethod** imt) SHARED_REQUIRES(Locks::mutator_lock_);
+ /*out*/ArtMethod** imt) REQUIRES_SHARED(Locks::mutator_lock_);
void FillImtFromSuperClass(Handle<mirror::Class> klass,
ArtMethod* unimplemented_method,
ArtMethod* imt_conflict_method,
bool* new_conflict,
- ArtMethod** imt) SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtMethod** imt) REQUIRES_SHARED(Locks::mutator_lock_);
std::vector<const DexFile*> boot_class_path_;
std::vector<std::unique_ptr<const DexFile>> boot_dex_files_;
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 5f225be..3be39a1 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -49,7 +49,7 @@
class ClassLinkerTest : public CommonRuntimeTest {
protected:
void AssertNonExistentClass(const std::string& descriptor)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Thread* self = Thread::Current();
EXPECT_TRUE(class_linker_->FindSystemClass(self, descriptor.c_str()) == nullptr);
EXPECT_TRUE(self->IsExceptionPending());
@@ -61,13 +61,13 @@
}
void AssertPrimitiveClass(const std::string& descriptor)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Thread* self = Thread::Current();
AssertPrimitiveClass(descriptor, class_linker_->FindSystemClass(self, descriptor.c_str()));
}
void AssertPrimitiveClass(const std::string& descriptor, mirror::Class* primitive)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ASSERT_TRUE(primitive != nullptr);
ASSERT_TRUE(primitive->GetClass() != nullptr);
ASSERT_EQ(primitive->GetClass(), primitive->GetClass()->GetClass());
@@ -103,7 +103,7 @@
}
void AssertObjectClass(mirror::Class* JavaLangObject)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ASSERT_TRUE(JavaLangObject != nullptr);
ASSERT_TRUE(JavaLangObject->GetClass() != nullptr);
ASSERT_EQ(JavaLangObject->GetClass(),
@@ -161,7 +161,7 @@
void AssertArrayClass(const std::string& array_descriptor,
const std::string& component_type,
mirror::ClassLoader* class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Thread* self = Thread::Current();
StackHandleScope<2> hs(self);
Handle<mirror::ClassLoader> loader(hs.NewHandle(class_loader));
@@ -175,7 +175,7 @@
}
void AssertArrayClass(const std::string& array_descriptor, Handle<mirror::Class> array)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ASSERT_TRUE(array.Get() != nullptr);
ASSERT_TRUE(array->GetClass() != nullptr);
ASSERT_EQ(array->GetClass(), array->GetClass()->GetClass());
@@ -226,7 +226,7 @@
ASSERT_EQ(JavaLangObject_imt, array->GetImt(pointer_size));
}
- void AssertMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void AssertMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
EXPECT_TRUE(method != nullptr);
EXPECT_TRUE(method->GetDeclaringClass() != nullptr);
EXPECT_TRUE(method->GetName() != nullptr);
@@ -243,7 +243,7 @@
}
void AssertField(mirror::Class* klass, ArtField* field)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
EXPECT_TRUE(field != nullptr);
EXPECT_EQ(klass, field->GetDeclaringClass());
EXPECT_TRUE(field->GetName() != nullptr);
@@ -251,7 +251,7 @@
}
void AssertClass(const std::string& descriptor, Handle<mirror::Class> klass)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::string temp;
EXPECT_STREQ(descriptor.c_str(), klass->GetDescriptor(&temp));
if (descriptor == "Ljava/lang/Object;") {
@@ -397,7 +397,7 @@
}
void AssertDexFileClass(mirror::ClassLoader* class_loader, const std::string& descriptor)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ASSERT_TRUE(descriptor != nullptr);
Thread* self = Thread::Current();
StackHandleScope<1> hs(self);
@@ -417,7 +417,7 @@
}
void AssertDexFile(const DexFile& dex, mirror::ClassLoader* class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Verify all the classes defined in this file
for (size_t i = 0; i < dex.NumClassDefs(); i++) {
const DexFile::ClassDef& class_def = dex.GetClassDef(i);
@@ -464,7 +464,7 @@
std::string class_descriptor;
std::vector<CheckOffset> offsets;
- bool Check() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool Check() REQUIRES_SHARED(Locks::mutator_lock_) {
Thread* self = Thread::Current();
mirror::Class* klass =
Runtime::Current()->GetClassLinker()->FindSystemClass(self, class_descriptor.c_str());
@@ -586,6 +586,8 @@
addOffset(OFFSETOF_MEMBER(mirror::Class, num_reference_static_fields_),
"numReferenceStaticFields");
addOffset(OFFSETOF_MEMBER(mirror::Class, object_size_), "objectSize");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, object_size_alloc_fast_path_),
+ "objectSizeAllocFastPath");
addOffset(OFFSETOF_MEMBER(mirror::Class, primitive_type_), "primitiveType");
addOffset(OFFSETOF_MEMBER(mirror::Class, reference_instance_offsets_),
"referenceInstanceOffsets");
@@ -1158,7 +1160,7 @@
}
static void CheckMethod(ArtMethod* method, bool verified)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!method->IsNative() && !method->IsAbstract()) {
EXPECT_EQ((method->GetAccessFlags() & kAccSkipAccessChecks) != 0U, verified)
<< PrettyMethod(method, true);
@@ -1166,7 +1168,7 @@
}
static void CheckVerificationAttempted(mirror::Class* c, bool preverified)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
EXPECT_EQ((c->GetAccessFlags() & kAccVerificationAttempted) != 0U, preverified)
<< "Class " << PrettyClass(c) << " not as expected";
for (auto& m : c->GetMethods(kRuntimePointerSize)) {
diff --git a/runtime/class_table.h b/runtime/class_table.h
index 6fb4206..66c241f 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -72,13 +72,13 @@
// Used by image writer for checking.
bool Contains(mirror::Class* klass)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Freeze the current class tables by allocating a new table and never updating or modifying the
// existing table. This helps prevents dirty pages after caused by inserting after zygote fork.
void FreezeSnapshot()
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the number of classes in previous snapshots.
size_t NumZygoteClasses() const REQUIRES(!lock_);
@@ -89,74 +89,74 @@
// Update a class in the table with the new class. Returns the existing class which was replaced.
mirror::Class* UpdateClass(const char* descriptor, mirror::Class* new_klass, size_t hash)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// NO_THREAD_SAFETY_ANALYSIS for object marking requiring heap bitmap lock.
template<class Visitor>
void VisitRoots(Visitor& visitor)
NO_THREAD_SAFETY_ANALYSIS
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<class Visitor>
void VisitRoots(const Visitor& visitor)
NO_THREAD_SAFETY_ANALYSIS
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Stops visit if the visitor returns false.
template <typename Visitor>
bool Visit(Visitor& visitor)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Return the first class that matches the descriptor. Returns null if there are none.
mirror::Class* Lookup(const char* descriptor, size_t hash)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Return the first class that matches the descriptor of klass. Returns null if there are none.
mirror::Class* LookupByDescriptor(mirror::Class* klass)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void Insert(mirror::Class* klass)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void InsertWithHash(mirror::Class* klass, size_t hash)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if the class was found and removed, false otherwise.
bool Remove(const char* descriptor)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Return true if we inserted the strong root, false if it already exists.
bool InsertStrongRoot(mirror::Object* obj)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Combines all of the tables into one class set.
size_t WriteToMemory(uint8_t* ptr) const
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Read a table from ptr and put it at the front of the class set.
size_t ReadFromMemory(uint8_t* ptr)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Add a class set to the front of classes.
void AddClassSet(ClassSet&& set)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Clear strong roots (other than classes themselves).
void ClearStrongRoots()
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ReaderWriterMutex& GetLock() {
return lock_;
diff --git a/runtime/code_simulator_container.h b/runtime/code_simulator_container.h
index 655a247..10178ba 100644
--- a/runtime/code_simulator_container.h
+++ b/runtime/code_simulator_container.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_CODE_SIMULATOR_CONTAINER_H_
#include "arch/instruction_set.h"
+#include "base/logging.h"
#include "simulator/code_simulator.h"
namespace art {
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 2d16a49..00394e9 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -114,12 +114,12 @@
std::string GetTestDexFileName(const char* name) const;
std::vector<std::unique_ptr<const DexFile>> OpenTestDexFiles(const char* name)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
std::unique_ptr<const DexFile> OpenTestDexFile(const char* name)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- jobject LoadDex(const char* dex_name) SHARED_REQUIRES(Locks::mutator_lock_);
+ jobject LoadDex(const char* dex_name) REQUIRES_SHARED(Locks::mutator_lock_);
std::string android_data_;
std::string dalvik_cache_;
@@ -195,6 +195,12 @@
DISALLOW_COPY_AND_ASSIGN(CheckJniAbortCatcher);
};
+#define TEST_DISABLED_FOR_TARGET() \
+ if (kIsTargetBuild) { \
+ printf("WARNING: TEST DISABLED FOR TARGET\n"); \
+ return; \
+ }
+
#define TEST_DISABLED_FOR_MIPS() \
if (kRuntimeISA == kMips) { \
printf("WARNING: TEST DISABLED FOR MIPS\n"); \
@@ -213,6 +219,12 @@
return; \
}
+#define TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS() \
+ if (!kHostStaticBuildEnabled) { \
+ printf("WARNING: TEST DISABLED FOR NON-STATIC HOST BUILDS\n"); \
+ return; \
+ }
+
} // namespace art
namespace std {
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index e1da23c..77362a5 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -36,7 +36,7 @@
namespace art {
static void AddReferrerLocation(std::ostream& os, mirror::Class* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (referrer != nullptr) {
std::string location(referrer->GetLocation());
if (!location.empty()) {
@@ -48,7 +48,7 @@
static void ThrowException(const char* exception_descriptor,
mirror::Class* referrer, const char* fmt, va_list* args = nullptr)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostringstream msg;
if (args != nullptr) {
std::string vmsg;
@@ -64,7 +64,7 @@
static void ThrowWrappedException(const char* exception_descriptor,
mirror::Class* referrer, const char* fmt, va_list* args = nullptr)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostringstream msg;
if (args != nullptr) {
std::string vmsg;
@@ -379,7 +379,7 @@
static void ThrowNullPointerExceptionForMethodAccessImpl(uint32_t method_idx,
const DexFile& dex_file,
InvokeType type)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostringstream msg;
msg << "Attempt to invoke " << type << " method '"
<< PrettyMethod(method_idx, dex_file, true) << "' on a null object reference";
@@ -413,7 +413,7 @@
}
static bool IsValidImplicitCheck(uintptr_t addr, ArtMethod* method, const Instruction& instr)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!CanDoImplicitNullCheckOn(addr)) {
return false;
}
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index cbd338d..ab25543 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -34,193 +34,193 @@
// AbstractMethodError
void ThrowAbstractMethodError(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowAbstractMethodError(uint32_t method_idx, const DexFile& dex_file)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// ArithmeticException
-void ThrowArithmeticExceptionDivideByZero() SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+void ThrowArithmeticExceptionDivideByZero() REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// ArrayIndexOutOfBoundsException
void ThrowArrayIndexOutOfBoundsException(int index, int length)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// ArrayStoreException
void ThrowArrayStoreException(mirror::Class* element_class, mirror::Class* array_class)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// ClassCircularityError
void ThrowClassCircularityError(mirror::Class* c)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowClassCircularityError(mirror::Class* c, const char* fmt, ...)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// ClassCastException
void ThrowClassCastException(mirror::Class* dest_type, mirror::Class* src_type)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowClassCastException(const char* msg)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// ClassFormatError
void ThrowClassFormatError(mirror::Class* referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// IllegalAccessError
void ThrowIllegalAccessErrorClass(mirror::Class* referrer, mirror::Class* accessed)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirror::Class* accessed,
ArtMethod* called,
InvokeType type)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIllegalAccessErrorMethod(mirror::Class* referrer, ArtMethod* accessed)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIllegalAccessErrorField(mirror::Class* referrer, ArtField* accessed)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIllegalAccessErrorFinalField(ArtMethod* referrer, ArtField* accessed)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIllegalAccessError(mirror::Class* referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// IllegalAccessException
void ThrowIllegalAccessException(const char* msg)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// IllegalArgumentException
void ThrowIllegalArgumentException(const char* msg)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// IncompatibleClassChangeError
void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType found_type,
ArtMethod* method, ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIncompatibleClassChangeErrorClassForInterfaceSuper(ArtMethod* method,
mirror::Class* target_class,
mirror::Object* this_object,
ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(ArtMethod* interface_method,
mirror::Object* this_object,
ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIncompatibleClassChangeErrorField(ArtField* resolved_field, bool is_static,
ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIncompatibleClassChangeError(mirror::Class* referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIncompatibleClassChangeErrorForMethodConflict(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// IOException
void ThrowIOException(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2)))
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowWrappedIOException(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2)))
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// LinkageError
void ThrowLinkageError(mirror::Class* referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowWrappedLinkageError(mirror::Class* referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// NegativeArraySizeException
void ThrowNegativeArraySizeException(int size)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowNegativeArraySizeException(const char* msg)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// NoSuchFieldError
void ThrowNoSuchFieldError(const StringPiece& scope, mirror::Class* c,
const StringPiece& type, const StringPiece& name)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowNoSuchFieldException(mirror::Class* c, const StringPiece& name)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// NoSuchMethodError
void ThrowNoSuchMethodError(InvokeType type, mirror::Class* c, const StringPiece& name,
const Signature& signature)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowNoSuchMethodError(uint32_t method_idx)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// NullPointerException
void ThrowNullPointerExceptionForFieldAccess(ArtField* field,
bool is_read)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowNullPointerExceptionForMethodAccess(uint32_t method_idx,
InvokeType type)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowNullPointerExceptionForMethodAccess(ArtMethod* method,
InvokeType type)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowNullPointerExceptionFromDexPC(bool check_address = false, uintptr_t addr = 0)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowNullPointerException(const char* msg)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// RuntimeException
void ThrowRuntimeException(const char* fmt, ...)
__attribute__((__format__(__printf__, 1, 2)))
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// Stack overflow.
-void ThrowStackOverflowError(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+void ThrowStackOverflowError(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// StringIndexOutOfBoundsException
void ThrowStringIndexOutOfBoundsException(int index, int length)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// VerifyError
void ThrowVerifyError(mirror::Class* referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
} // namespace art
diff --git a/runtime/compiler_callbacks.h b/runtime/compiler_callbacks.h
index a39d682..ee797e0 100644
--- a/runtime/compiler_callbacks.h
+++ b/runtime/compiler_callbacks.h
@@ -38,7 +38,7 @@
virtual ~CompilerCallbacks() { }
virtual void MethodVerified(verifier::MethodVerifier* verifier)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
virtual void ClassRejected(ClassReference ref) = 0;
// Return true if we should attempt to relocate to a random base address if we have not already
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index a5b0689..9f3c2aa 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -73,7 +73,7 @@
// copied from some other class). This ensures that the debugger does not get confused as to which
// method we are in.
static ArtMethod* GetCanonicalMethod(ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (LIKELY(!m->IsDefault())) {
return m;
} else {
@@ -95,7 +95,7 @@
deoptimization_kind_ == DeoptimizationRequest::kFullDeoptimization);
}
- Breakpoint(const Breakpoint& other) SHARED_REQUIRES(Locks::mutator_lock_)
+ Breakpoint(const Breakpoint& other) REQUIRES_SHARED(Locks::mutator_lock_)
: method_(other.method_),
dex_pc_(other.dex_pc_),
deoptimization_kind_(other.deoptimization_kind_) {}
@@ -116,7 +116,7 @@
// Returns true if the method of this breakpoint and the passed in method should be considered the
// same. That is, they are either the same method or they are copied from the same method.
- bool IsInMethod(ArtMethod* m) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsInMethod(ArtMethod* m) const REQUIRES_SHARED(Locks::mutator_lock_) {
return method_ == GetCanonicalMethod(m);
}
@@ -130,7 +130,7 @@
};
static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.Method()).c_str(), rhs.DexPc());
return os;
}
@@ -142,7 +142,7 @@
void MethodEntered(Thread* thread, mirror::Object* this_object, ArtMethod* method,
uint32_t dex_pc)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
if (method->IsNative()) {
// TODO: post location events is a suspension point and native method entry stubs aren't.
return;
@@ -168,7 +168,7 @@
void MethodExited(Thread* thread, mirror::Object* this_object, ArtMethod* method,
uint32_t dex_pc, const JValue& return_value)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
if (method->IsNative()) {
// TODO: post location events is a suspension point and native method entry stubs aren't.
return;
@@ -185,7 +185,7 @@
void MethodUnwind(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object ATTRIBUTE_UNUSED,
ArtMethod* method, uint32_t dex_pc)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method)
<< " " << dex_pc;
@@ -193,7 +193,7 @@
void DexPcMoved(Thread* thread, mirror::Object* this_object, ArtMethod* method,
uint32_t new_dex_pc)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
if (IsListeningToMethodExit() && IsReturn(method, new_dex_pc)) {
// We also listen to kMethodExited instrumentation event and the current instruction is a
// RETURN so we know the MethodExited method is going to be called right after us. Like in
@@ -214,25 +214,25 @@
void FieldRead(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc, ArtField* field)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field);
}
void FieldWritten(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc, ArtField* field,
const JValue& field_value)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
}
void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED, mirror::Throwable* exception_object)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
Dbg::PostException(exception_object);
}
// We only care about branches in the Jit.
void Branch(Thread* /*thread*/, ArtMethod* method, uint32_t dex_pc, int32_t dex_pc_offset)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(ERROR) << "Unexpected branch event in debugger " << PrettyMethod(method)
<< " " << dex_pc << ", " << dex_pc_offset;
}
@@ -243,29 +243,29 @@
ArtMethod* method,
uint32_t dex_pc,
ArtMethod*)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(ERROR) << "Unexpected invoke event in debugger " << PrettyMethod(method)
<< " " << dex_pc;
}
private:
static bool IsReturn(ArtMethod* method, uint32_t dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = method->GetCodeItem();
const Instruction* instruction = Instruction::At(&code_item->insns_[dex_pc]);
return instruction->IsReturn();
}
- static bool IsListeningToDexPcMoved() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static bool IsListeningToDexPcMoved() REQUIRES_SHARED(Locks::mutator_lock_) {
return IsListeningTo(instrumentation::Instrumentation::kDexPcMoved);
}
- static bool IsListeningToMethodExit() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static bool IsListeningToMethodExit() REQUIRES_SHARED(Locks::mutator_lock_) {
return IsListeningTo(instrumentation::Instrumentation::kMethodExited);
}
static bool IsListeningTo(instrumentation::Instrumentation::InstrumentationEvent event)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return (Dbg::GetInstrumentationEvents() & event) != 0;
}
@@ -329,7 +329,7 @@
static bool IsBreakpoint(ArtMethod* m, uint32_t dex_pc)
REQUIRES(!Locks::breakpoint_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].IsInMethod(m)) {
@@ -349,7 +349,7 @@
}
static mirror::Array* DecodeNonNullArray(JDWP::RefTypeId id, JDWP::JdwpError* error)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error);
if (o == nullptr) {
*error = JDWP::ERR_INVALID_OBJECT;
@@ -364,7 +364,7 @@
}
static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError* error)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error);
if (o == nullptr) {
*error = JDWP::ERR_INVALID_OBJECT;
@@ -380,7 +380,7 @@
static Thread* DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_id,
JDWP::JdwpError* error)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) {
mirror::Object* thread_peer = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_id, error);
if (thread_peer == nullptr) {
@@ -411,14 +411,14 @@
}
static JDWP::JdwpTag BasicTagFromClass(mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::string temp;
const char* descriptor = klass->GetDescriptor(&temp);
return BasicTagFromDescriptor(descriptor);
}
static JDWP::JdwpTag TagFromClass(const ScopedObjectAccessUnchecked& soa, mirror::Class* c)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(c != nullptr);
if (c->IsArrayClass()) {
return JDWP::JT_ARRAY;
@@ -822,7 +822,7 @@
OwnedMonitorVisitor(Thread* thread, Context* context,
std::vector<JDWP::ObjectId>* monitor_vector,
std::vector<uint32_t>* stack_depth_vector)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
current_stack_depth(0),
monitors(monitor_vector),
@@ -839,7 +839,7 @@
}
static void AppendOwnedMonitors(mirror::Object* owned_monitor, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
OwnedMonitorVisitor* visitor = reinterpret_cast<OwnedMonitorVisitor*>(arg);
visitor->monitors->push_back(gRegistry->Add(owned_monitor));
visitor->stack_depths->push_back(visitor->current_stack_depth);
@@ -1013,7 +1013,7 @@
public:
explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes) : classes_(classes) {}
- bool operator()(mirror::Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool operator()(mirror::Class* c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
if (!c->IsPrimitive()) {
classes_->push_back(Dbg::GetObjectRegistry()->AddRefType(c));
}
@@ -1333,17 +1333,17 @@
}
static JDWP::MethodId ToMethodId(ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(GetCanonicalMethod(m)));
}
static ArtField* FromFieldId(JDWP::FieldId fid)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return reinterpret_cast<ArtField*>(static_cast<uintptr_t>(fid));
}
static ArtMethod* FromMethodId(JDWP::MethodId mid)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(mid));
}
@@ -1436,7 +1436,7 @@
* the end.
*/
static uint16_t MangleSlot(uint16_t slot, ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = m->GetCodeItem();
if (code_item == nullptr) {
// We should not get here for a method without code (native, proxy or abstract). Log it and
@@ -1454,7 +1454,7 @@
}
static size_t GetMethodNumArgRegistersIncludingThis(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t num_registers = ArtMethod::NumArgRegisters(method->GetShorty());
if (!method->IsStatic()) {
++num_registers;
@@ -1467,7 +1467,7 @@
* slots to dex style argument placement.
*/
static uint16_t DemangleSlot(uint16_t slot, ArtMethod* m, JDWP::JdwpError* error)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = m->GetCodeItem();
if (code_item == nullptr) {
// We should not get here for a method without code (native, proxy or abstract). Log it and
@@ -1617,7 +1617,7 @@
bool with_generic;
static void Callback(void* context, const DexFile::LocalInfo& entry)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
uint16_t slot = entry.reg_;
@@ -1706,7 +1706,7 @@
}
static JValue GetArtFieldValue(ArtField* f, mirror::Object* o)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Primitive::Type fieldType = f->GetTypeAsPrimitiveType();
JValue field_value;
switch (fieldType) {
@@ -1753,7 +1753,7 @@
static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id,
JDWP::FieldId field_id, JDWP::ExpandBuf* pReply,
bool is_static)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JDWP::JdwpError error;
mirror::Class* c = DecodeClass(ref_type_id, &error);
if (ref_type_id != 0 && c == nullptr) {
@@ -1809,7 +1809,7 @@
}
static JDWP::JdwpError SetArtFieldValue(ArtField* f, mirror::Object* o, uint64_t value, int width)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Primitive::Type fieldType = f->GetTypeAsPrimitiveType();
// Debugging only happens at runtime so we know we are not running in a transaction.
static constexpr bool kNoTransactionMode = false;
@@ -1880,7 +1880,7 @@
static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id,
uint64_t value, int width, bool is_static)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JDWP::JdwpError error;
mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error);
if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) {
@@ -2008,7 +2008,7 @@
static mirror::Object* DecodeThreadGroup(ScopedObjectAccessUnchecked& soa,
JDWP::ObjectId thread_group_id, JDWP::JdwpError* error)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* thread_group = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_group_id,
error);
if (*error != JDWP::ERR_NONE) {
@@ -2067,7 +2067,7 @@
static void GetChildThreadGroups(ScopedObjectAccessUnchecked& soa, mirror::Object* thread_group,
std::vector<JDWP::ObjectId>* child_thread_group_ids)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(thread_group != nullptr);
// Get the int "ngroups" count of this thread group...
@@ -2221,7 +2221,7 @@
static bool IsInDesiredThreadGroup(ScopedObjectAccessUnchecked& soa,
mirror::Object* desired_thread_group, mirror::Object* peer)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Do we want threads from all thread groups?
if (desired_thread_group == nullptr) {
return true;
@@ -2265,7 +2265,7 @@
}
}
-static int GetStackDepth(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_) {
+static int GetStackDepth(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_) {
struct CountStackDepthVisitor : public StackVisitor {
explicit CountStackDepthVisitor(Thread* thread_in)
: StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
@@ -2308,7 +2308,7 @@
public:
GetFrameVisitor(Thread* thread, size_t start_frame_in, size_t frame_count_in,
JDWP::ExpandBuf* buf_in)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
depth_(0),
start_frame_(start_frame_in),
@@ -2317,7 +2317,7 @@
expandBufAdd4BE(buf_, frame_count_);
}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
if (GetMethod()->IsRuntimeMethod()) {
return true; // The debugger can't do anything useful with a frame that has no Method*.
}
@@ -2433,7 +2433,7 @@
struct GetThisVisitor : public StackVisitor {
GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id_in)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
this_object(nullptr),
frame_id(frame_id_in) {}
@@ -2475,7 +2475,7 @@
class FindFrameVisitor FINAL : public StackVisitor {
public:
FindFrameVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
frame_id_(frame_id),
error_(JDWP::ERR_INVALID_FRAMEID) {}
@@ -2551,14 +2551,14 @@
constexpr JDWP::JdwpError kStackFrameLocalAccessError = JDWP::ERR_ABSENT_INFORMATION;
static std::string GetStackContextAsString(const StackVisitor& visitor)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return StringPrintf(" at DEX pc 0x%08x in method %s", visitor.GetDexPc(false),
PrettyMethod(visitor.GetMethod()).c_str());
}
static JDWP::JdwpError FailGetLocalValue(const StackVisitor& visitor, uint16_t vreg,
JDWP::JdwpTag tag)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(ERROR) << "Failed to read " << tag << " local from register v" << vreg
<< GetStackContextAsString(visitor);
return kStackFrameLocalAccessError;
@@ -2720,7 +2720,7 @@
template<typename T>
static JDWP::JdwpError FailSetLocalValue(const StackVisitor& visitor, uint16_t vreg,
JDWP::JdwpTag tag, T value)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(ERROR) << "Failed to write " << tag << " local " << value
<< " (0x" << std::hex << value << ") into register v" << vreg
<< GetStackContextAsString(visitor);
@@ -2814,7 +2814,7 @@
}
static void SetEventLocation(JDWP::EventLocation* location, ArtMethod* m, uint32_t dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(location != nullptr);
if (m == nullptr) {
memset(location, 0, sizeof(*location));
@@ -2892,7 +2892,7 @@
class CatchLocationFinder : public StackVisitor {
public:
CatchLocationFinder(Thread* self, const Handle<mirror::Throwable>& exception, Context* context)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
exception_(exception),
handle_scope_(self),
@@ -2903,7 +2903,7 @@
throw_dex_pc_(DexFile::kDexNoIndex) {
}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = GetMethod();
DCHECK(method != nullptr);
if (method->IsRuntimeMethod()) {
@@ -2937,15 +2937,15 @@
return true; // Continue stack walk.
}
- ArtMethod* GetCatchMethod() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ArtMethod* GetCatchMethod() REQUIRES_SHARED(Locks::mutator_lock_) {
return catch_method_;
}
- ArtMethod* GetThrowMethod() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ArtMethod* GetThrowMethod() REQUIRES_SHARED(Locks::mutator_lock_) {
return throw_method_;
}
- mirror::Object* GetThisAtThrow() SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Object* GetThisAtThrow() REQUIRES_SHARED(Locks::mutator_lock_) {
return this_at_throw_.Get();
}
@@ -3247,7 +3247,7 @@
}
static const Breakpoint* FindFirstBreakpointForMethod(ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::breakpoint_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
for (Breakpoint& breakpoint : gBreakpoints) {
if (breakpoint.IsInMethod(m)) {
return &breakpoint;
@@ -3264,7 +3264,7 @@
// Sanity checks all existing breakpoints on the same method.
static void SanityCheckExistingBreakpoints(ArtMethod* m,
DeoptimizationRequest::Kind deoptimization_kind)
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::breakpoint_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
for (const Breakpoint& breakpoint : gBreakpoints) {
if (breakpoint.IsInMethod(m)) {
CHECK_EQ(deoptimization_kind, breakpoint.GetDeoptimizationKind());
@@ -3293,7 +3293,7 @@
static DeoptimizationRequest::Kind GetRequiredDeoptimizationKind(Thread* self,
ArtMethod* m,
const Breakpoint** existing_brkpt)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!Dbg::RequiresDeoptimization()) {
// We already run in interpreter-only mode so we don't need to deoptimize anything.
VLOG(jdwp) << "No need for deoptimization when fully running with interpreter for method "
@@ -3550,11 +3550,11 @@
class NeedsDeoptimizationVisitor : public StackVisitor {
public:
explicit NeedsDeoptimizationVisitor(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
needs_deoptimization_(false) {}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
// The visitor is meant to be used when handling exception from compiled code only.
CHECK(!IsShadowFrame()) << "We only expect to visit compiled frame: " << PrettyMethod(GetMethod());
ArtMethod* method = GetMethod();
@@ -3616,7 +3616,7 @@
public:
ScopedDebuggerThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
REQUIRES(!Locks::thread_list_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) :
+ REQUIRES_SHARED(Locks::mutator_lock_) :
thread_(nullptr),
error_(JDWP::ERR_NONE),
self_suspend_(false),
@@ -3678,7 +3678,7 @@
// Work out what ArtMethod* we're in, the current line number, and how deep the stack currently
// is for step-out.
struct SingleStepStackVisitor : public StackVisitor {
- explicit SingleStepStackVisitor(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_)
+ explicit SingleStepStackVisitor(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
stack_depth(0),
method(nullptr),
@@ -4559,7 +4559,7 @@
needHeader_ = false;
}
- void Flush() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Flush() REQUIRES_SHARED(Locks::mutator_lock_) {
if (pieceLenField_ == nullptr) {
// Flush immediately post Reset (maybe back-to-back Flush). Ignore.
CHECK(needHeader_);
@@ -4575,13 +4575,13 @@
}
static void HeapChunkJavaCallback(void* start, void* end, size_t used_bytes, void* arg)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_,
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_,
Locks::mutator_lock_) {
reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkJavaCallback(start, end, used_bytes);
}
static void HeapChunkNativeCallback(void* start, void* end, size_t used_bytes, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkNativeCallback(start, end, used_bytes);
}
@@ -4601,7 +4601,7 @@
}
// Returns true if the object is not an empty chunk.
- bool ProcessRecord(void* start, size_t used_bytes) SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool ProcessRecord(void* start, size_t used_bytes) REQUIRES_SHARED(Locks::mutator_lock_) {
// Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken
// in the following code not to allocate memory, by ensuring buf_ is of the correct size
if (used_bytes == 0) {
@@ -4638,7 +4638,7 @@
}
void HeapChunkNativeCallback(void* start, void* /*end*/, size_t used_bytes)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (ProcessRecord(start, used_bytes)) {
uint8_t state = ExamineNativeObject(start);
AppendChunk(state, start, used_bytes + chunk_overhead_, true /*is_native*/);
@@ -4647,7 +4647,7 @@
}
void HeapChunkJavaCallback(void* start, void* /*end*/, size_t used_bytes)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
if (ProcessRecord(start, used_bytes)) {
// Determine the type of this chunk.
// OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
@@ -4659,7 +4659,7 @@
}
void AppendChunk(uint8_t state, void* ptr, size_t length, bool is_native)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Make sure there's enough room left in the buffer.
// We need to use two bytes for every fractional 256 allocation units used by the chunk plus
// 17 bytes for any header.
@@ -4692,12 +4692,12 @@
*p_++ = length - 1;
}
- uint8_t ExamineNativeObject(const void* p) SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint8_t ExamineNativeObject(const void* p) REQUIRES_SHARED(Locks::mutator_lock_) {
return p == nullptr ? HPSG_STATE(SOLIDITY_FREE, 0) : HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
}
uint8_t ExamineJavaObject(mirror::Object* o)
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
if (o == nullptr) {
return HPSG_STATE(SOLIDITY_FREE, 0);
}
@@ -4747,7 +4747,7 @@
};
static void BumpPointerSpaceCallback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
HeapChunkContext::HeapChunkJavaCallback(
obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, arg);
@@ -4901,7 +4901,7 @@
};
static const char* GetMethodSourceFile(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(method != nullptr);
const char* source_file = method->GetDeclaringClassSourceFile();
return (source_file != nullptr) ? source_file : "";
diff --git a/runtime/debugger.h b/runtime/debugger.h
index e908304..7398c4e 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -80,7 +80,7 @@
JDWP::ExpandBuf* const reply;
void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
DISALLOW_COPY_AND_ASSIGN(DebugInvokeReq);
@@ -156,15 +156,15 @@
DeoptimizationRequest() : kind_(kNothing), instrumentation_event_(0), method_(nullptr) {}
DeoptimizationRequest(const DeoptimizationRequest& other)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: kind_(other.kind_), instrumentation_event_(other.instrumentation_event_) {
// Create a new JNI global reference for the method.
SetMethod(other.Method());
}
- ArtMethod* Method() const SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtMethod* Method() const REQUIRES_SHARED(Locks::mutator_lock_);
- void SetMethod(ArtMethod* m) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetMethod(ArtMethod* m) REQUIRES_SHARED(Locks::mutator_lock_);
// Name 'Kind()' would collide with the above enum name.
Kind GetKind() const {
@@ -240,7 +240,7 @@
// Returns true if a method has any breakpoints.
static bool MethodHasAnyBreakpoints(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::breakpoint_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::breakpoint_lock_);
static bool IsDisposed() {
return gDisposed;
@@ -260,178 +260,178 @@
* Class, Object, Array
*/
static std::string GetClassName(JDWP::RefTypeId id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static std::string GetClassName(mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId* class_object_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId* superclass_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void GetClassList(std::vector<JDWP::RefTypeId>* classes)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag,
uint32_t* pStatus, std::string* pDescriptor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>* ids)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetSignature(JDWP::RefTypeId ref_type_id, std::string* signature)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetSourceFile(JDWP::RefTypeId ref_type_id, std::string* source_file)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetObjectTag(JDWP::ObjectId object_id, uint8_t* tag)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static size_t GetTagWidth(JDWP::JdwpTag tag);
static JDWP::JdwpError GetArrayLength(JDWP::ObjectId array_id, int32_t* length)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError OutputArray(JDWP::ObjectId array_id, int offset, int count,
JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError SetArrayElements(JDWP::ObjectId array_id, int offset, int count,
JDWP::Request* request)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError CreateString(const std::string& str, JDWP::ObjectId* new_string_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId* new_object_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length,
JDWP::ObjectId* new_array_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
//
// Event filtering.
//
static bool MatchThread(JDWP::ObjectId expected_thread_id, Thread* event_thread)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool MatchLocation(const JDWP::JdwpLocation& expected_location,
const JDWP::EventLocation& event_location)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool MatchType(mirror::Class* event_class, JDWP::RefTypeId class_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool MatchField(JDWP::RefTypeId expected_type_id, JDWP::FieldId expected_field_id,
ArtField* event_field)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool MatchInstance(JDWP::ObjectId expected_instance_id, mirror::Object* event_instance)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
//
// Monitors.
//
static JDWP::JdwpError GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetOwnedMonitors(JDWP::ObjectId thread_id,
std::vector<JDWP::ObjectId>* monitors,
std::vector<uint32_t>* stack_depths)
- REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetContendedMonitor(JDWP::ObjectId thread_id,
JDWP::ObjectId* contended_monitor)
- REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
//
// Heap.
//
static JDWP::JdwpError GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
std::vector<uint64_t>* counts)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetInstances(JDWP::RefTypeId class_id, int32_t max_count,
std::vector<JDWP::ObjectId>* instances)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
std::vector<JDWP::ObjectId>* referring_objects)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError DisableCollection(JDWP::ObjectId object_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError EnableCollection(JDWP::ObjectId object_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError IsCollected(JDWP::ObjectId object_id, bool* is_collected)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void DisposeObject(JDWP::ObjectId object_id, uint32_t reference_count)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
//
// Methods and fields.
//
static std::string GetMethodName(JDWP::MethodId method_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError OutputDeclaredFields(JDWP::RefTypeId ref_type_id, bool with_generic,
JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError OutputDeclaredMethods(JDWP::RefTypeId ref_type_id, bool with_generic,
JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError OutputDeclaredInterfaces(JDWP::RefTypeId ref_type_id,
JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void OutputLineTable(JDWP::RefTypeId ref_type_id, JDWP::MethodId method_id,
JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void OutputVariableTable(JDWP::RefTypeId ref_type_id, JDWP::MethodId id, bool with_generic,
JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value,
JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value,
JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetBytecodes(JDWP::RefTypeId class_id, JDWP::MethodId method_id,
std::vector<uint8_t>* bytecodes)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static std::string GetFieldName(JDWP::FieldId field_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpTag GetFieldBasicTag(JDWP::FieldId field_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpTag GetStaticFieldBasicTag(JDWP::FieldId field_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id,
JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id,
uint64_t value, int width)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::FieldId field_id,
JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError StringToUtf8(JDWP::ObjectId string_id, std::string* str)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Thread, ThreadGroup, Frame
*/
static JDWP::JdwpError GetThreadName(JDWP::ObjectId thread_id, std::string* name)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::thread_list_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::thread_list_lock_);
static JDWP::JdwpError GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::thread_list_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::thread_list_lock_);
static JDWP::JdwpError GetThreadGroupName(JDWP::ObjectId thread_group_id,
JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetThreadGroupParent(JDWP::ObjectId thread_group_id,
JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetThreadGroupChildren(JDWP::ObjectId thread_group_id,
JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::ObjectId GetSystemThreadGroupId()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpThreadStatus ToJdwpThreadStatus(ThreadState state);
static JDWP::JdwpError GetThreadStatus(JDWP::ObjectId thread_id,
@@ -446,16 +446,16 @@
// Fills 'thread_ids' with the threads in the given thread group. If thread_group_id == 0,
// returns all threads.
static void GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* thread_ids)
- REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetThreadFrameCount(JDWP::ObjectId thread_id, size_t* result)
REQUIRES(!Locks::thread_list_lock_);
static JDWP::JdwpError GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame,
size_t frame_count, JDWP::ExpandBuf* buf)
- REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
- static JDWP::ObjectId GetThreadSelfId() SHARED_REQUIRES(Locks::mutator_lock_);
- static JDWP::ObjectId GetThreadId(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_);
+ static JDWP::ObjectId GetThreadSelfId() REQUIRES_SHARED(Locks::mutator_lock_);
+ static JDWP::ObjectId GetThreadId(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_);
static void SuspendVM()
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
@@ -467,17 +467,17 @@
static void ResumeThread(JDWP::ObjectId thread_id)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void SuspendSelf();
static JDWP::JdwpError GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id,
JDWP::ObjectId* result)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pReply)
- REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError SetLocalValues(JDWP::Request* request)
- REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError Interrupt(JDWP::ObjectId thread_id)
REQUIRES(!Locks::thread_list_lock_);
@@ -493,42 +493,42 @@
};
static void PostFieldAccessEvent(ArtMethod* m, int dex_pc, mirror::Object* this_object,
ArtField* f)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void PostFieldModificationEvent(ArtMethod* m, int dex_pc,
mirror::Object* this_object, ArtField* f,
const JValue* field_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void PostException(mirror::Throwable* exception)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void PostThreadStart(Thread* t)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void PostThreadDeath(Thread* t)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void PostClassPrepare(mirror::Class* c)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void UpdateDebugger(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t new_dex_pc,
int event_flags, const JValue* return_value)
- REQUIRES(!Locks::breakpoint_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::breakpoint_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
// Indicates whether we need deoptimization for debugging.
static bool RequiresDeoptimization();
// Records deoptimization request in the queue.
static void RequestDeoptimization(const DeoptimizationRequest& req)
- REQUIRES(!Locks::deoptimization_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::deoptimization_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
// Manage deoptimization after updating JDWP events list. Suspends all threads, processes each
// request and finally resumes all threads.
static void ManageDeoptimization()
- REQUIRES(!Locks::deoptimization_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::deoptimization_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
// Breakpoints.
static void WatchLocation(const JDWP::JdwpLocation* pLoc, DeoptimizationRequest* req)
- REQUIRES(!Locks::breakpoint_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::breakpoint_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
static void UnwatchLocation(const JDWP::JdwpLocation* pLoc, DeoptimizationRequest* req)
- REQUIRES(!Locks::breakpoint_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::breakpoint_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Forced interpreter checkers for single-step and continue support.
@@ -537,7 +537,7 @@
// Indicates whether we need to force the use of interpreter to invoke a method.
// This allows to single-step or continue into the called method.
static bool IsForcedInterpreterNeededForCalling(Thread* thread, ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!IsDebuggerActive()) {
return false;
}
@@ -548,7 +548,7 @@
// method through the resolution trampoline. This allows to single-step or continue into
// the called method.
static bool IsForcedInterpreterNeededForResolution(Thread* thread, ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!IsDebuggerActive()) {
return false;
}
@@ -559,7 +559,7 @@
// a method through the resolution trampoline. This allows to deoptimize the stack for
// debugging when we returned from the called method.
static bool IsForcedInstrumentationNeededForResolution(Thread* thread, ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!IsDebuggerActive()) {
return false;
}
@@ -570,7 +570,7 @@
// interpreter into the runtime. This allows to deoptimize the stack and continue
// execution with interpreter for debugging.
static bool IsForcedInterpreterNeededForUpcall(Thread* thread, ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!IsDebuggerActive() && !thread->HasDebuggerShadowFrames()) {
return false;
}
@@ -583,7 +583,7 @@
// Note: the interpreter will start by handling the exception when executing
// the deoptimized frames.
static bool IsForcedInterpreterNeededForException(Thread* thread)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!IsDebuggerActive() && !thread->HasDebuggerShadowFrames()) {
return false;
}
@@ -593,9 +593,9 @@
// Single-stepping.
static JDWP::JdwpError ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize size,
JDWP::JdwpStepDepth depth)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void UnconfigureStep(JDWP::ObjectId thread_id)
- REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Invoke support
@@ -616,7 +616,7 @@
uint64_t arg_values[], JDWP::JdwpTag* arg_types,
uint32_t options)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Called by the event thread to execute a method prepared by the JDWP thread in the given
// DebugInvokeReq object. Once the invocation completes, the event thread attaches a reply
@@ -633,29 +633,29 @@
* DDM support.
*/
static void DdmSendThreadNotification(Thread* t, uint32_t type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void DdmSetThreadNotification(bool enable)
REQUIRES(!Locks::thread_list_lock_);
static bool DdmHandlePacket(JDWP::Request* request, uint8_t** pReplyBuf, int* pReplyLen);
- static void DdmConnected() SHARED_REQUIRES(Locks::mutator_lock_);
- static void DdmDisconnected() SHARED_REQUIRES(Locks::mutator_lock_);
+ static void DdmConnected() REQUIRES_SHARED(Locks::mutator_lock_);
+ static void DdmDisconnected() REQUIRES_SHARED(Locks::mutator_lock_);
static void DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void DdmSendChunk(uint32_t type, size_t len, const uint8_t* buf)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Visit breakpoint roots, used to prevent unloading of methods with breakpoints.
static void VisitRoots(RootVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Allocation tracking support.
*/
static void SetAllocTrackingEnabled(bool enabled) REQUIRES(!Locks::alloc_tracker_lock_);
static jbyteArray GetRecentAllocations()
- REQUIRES(!Locks::alloc_tracker_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::alloc_tracker_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
static void DumpRecentAllocations() REQUIRES(!Locks::alloc_tracker_lock_);
enum HpifWhen {
@@ -665,7 +665,7 @@
HPIF_WHEN_EVERY_GC = 3
};
static int DdmHandleHpifChunk(HpifWhen when)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
enum HpsgWhen {
HPSG_WHEN_NEVER = 0,
@@ -678,79 +678,79 @@
static bool DdmHandleHpsgNhsgChunk(HpsgWhen when, HpsgWhat what, bool native);
static void DdmSendHeapInfo(HpifWhen reason)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void DdmSendHeapSegments(bool native)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static ObjectRegistry* GetObjectRegistry() {
return gRegistry;
}
static JDWP::JdwpTag TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpTypeTag GetTypeTag(mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::FieldId ToFieldId(const ArtField* f)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
static JDWP::JdwpState* GetJdwpState();
- static uint32_t GetInstrumentationEvents() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static uint32_t GetInstrumentationEvents() REQUIRES_SHARED(Locks::mutator_lock_) {
return instrumentation_events_;
}
private:
static void ExecuteMethodWithoutPendingException(ScopedObjectAccess& soa, DebugInvokeReq* pReq)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void BuildInvokeReply(JDWP::ExpandBuf* pReply, uint32_t request_id,
JDWP::JdwpTag result_tag, uint64_t result_value,
JDWP::ObjectId exception)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetLocalValue(const StackVisitor& visitor,
ScopedObjectAccessUnchecked& soa, int slot,
JDWP::JdwpTag tag, uint8_t* buf, size_t width)
- REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError SetLocalValue(Thread* thread, StackVisitor& visitor, int slot,
JDWP::JdwpTag tag, uint64_t value, size_t width)
- REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
- static void DdmBroadcast(bool connect) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void DdmBroadcast(bool connect) REQUIRES_SHARED(Locks::mutator_lock_);
static void PostThreadStartOrStop(Thread*, uint32_t)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void PostLocationEvent(ArtMethod* method, int pcOffset,
mirror::Object* thisPtr, int eventFlags,
const JValue* return_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void ProcessDeoptimizationRequest(const DeoptimizationRequest& request)
REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_);
static void RequestDeoptimizationLocked(const DeoptimizationRequest& req)
- REQUIRES(Locks::deoptimization_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(Locks::deoptimization_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
static bool IsForcedInterpreterNeededForCallingImpl(Thread* thread, ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool IsForcedInterpreterNeededForResolutionImpl(Thread* thread, ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool IsForcedInterpreterNeededForUpcallImpl(Thread* thread, ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool IsForcedInterpreterNeededForExceptionImpl(Thread* thread)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Indicates whether the debugger is making requests.
static bool gDebuggerActive;
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 90c678c..ebadd79 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -42,6 +42,7 @@
#include "dex_file_verifier.h"
#include "globals.h"
#include "handle_scope-inl.h"
+#include "jvalue.h"
#include "leb128.h"
#include "mirror/field.h"
#include "mirror/method.h"
@@ -68,6 +69,11 @@
{'0', '3', '8', '\0'}
};
+struct DexFile::AnnotationValue {
+ JValue value_;
+ uint8_t type_;
+};
+
bool DexFile::GetChecksum(const char* filename, uint32_t* checksum, std::string* error_msg) {
CHECK(checksum != nullptr);
uint32_t magic;
@@ -222,6 +228,10 @@
nullptr,
oat_dex_file,
error_msg);
+ if (dex_file == nullptr) {
+ return nullptr;
+ }
+
if (verify && !DexFileVerifier::Verify(dex_file.get(),
dex_file->Begin(),
dex_file->Size(),
@@ -230,7 +240,32 @@
error_msg)) {
return nullptr;
}
+ return dex_file;
+}
+std::unique_ptr<const DexFile> DexFile::Open(const std::string& location,
+ uint32_t location_checksum,
+ std::unique_ptr<MemMap> mem_map,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg) {
+ ScopedTrace trace(std::string("Open dex file from mapped-memory ") + location);
+ std::unique_ptr<const DexFile> dex_file = OpenMemory(location,
+ location_checksum,
+ std::move(mem_map),
+ error_msg);
+ if (dex_file == nullptr) {
+ return nullptr;
+ }
+
+ if (verify && !DexFileVerifier::Verify(dex_file.get(),
+ dex_file->Begin(),
+ dex_file->Size(),
+ location.c_str(),
+ verify_checksum,
+ error_msg)) {
+ return nullptr;
+ }
return dex_file;
}
@@ -263,7 +298,7 @@
/*low_4gb*/false,
location,
error_msg));
- if (map.get() == nullptr) {
+ if (map == nullptr) {
DCHECK(!error_msg->empty());
return nullptr;
}
@@ -277,7 +312,9 @@
const Header* dex_header = reinterpret_cast<const Header*>(map->Begin());
- std::unique_ptr<const DexFile> dex_file(OpenMemory(location, dex_header->checksum_, map.release(),
+ std::unique_ptr<const DexFile> dex_file(OpenMemory(location,
+ dex_header->checksum_,
+ std::move(map),
error_msg));
if (dex_file.get() == nullptr) {
*error_msg = StringPrintf("Failed to open dex file '%s' from memory: %s", location,
@@ -314,13 +351,13 @@
std::unique_ptr<const DexFile> DexFile::OpenMemory(const std::string& location,
uint32_t location_checksum,
- MemMap* mem_map,
+ std::unique_ptr<MemMap> mem_map,
std::string* error_msg) {
return OpenMemory(mem_map->Begin(),
mem_map->Size(),
location,
location_checksum,
- mem_map,
+ std::move(mem_map),
nullptr,
error_msg);
}
@@ -350,9 +387,11 @@
*error_code = ZipOpenErrorCode::kExtractToMemoryError;
return nullptr;
}
- std::unique_ptr<const DexFile> dex_file(OpenMemory(location, zip_entry->GetCrc32(), map.release(),
- error_msg));
- if (dex_file.get() == nullptr) {
+ std::unique_ptr<const DexFile> dex_file(OpenMemory(location,
+ zip_entry->GetCrc32(),
+ std::move(map),
+ error_msg));
+ if (dex_file == nullptr) {
*error_msg = StringPrintf("Failed to open dex file '%s' from memory: %s", location.c_str(),
error_msg->c_str());
*error_code = ZipOpenErrorCode::kDexFileError;
@@ -437,14 +476,14 @@
size_t size,
const std::string& location,
uint32_t location_checksum,
- MemMap* mem_map,
+ std::unique_ptr<MemMap> mem_map,
const OatDexFile* oat_dex_file,
std::string* error_msg) {
DCHECK(base != nullptr);
DCHECK_NE(size, 0U);
CHECK_ALIGNED(base, 4); // various dex file structures must be word aligned
std::unique_ptr<DexFile> dex_file(
- new DexFile(base, size, location, location_checksum, mem_map, oat_dex_file));
+ new DexFile(base, size, location, location_checksum, std::move(mem_map), oat_dex_file));
if (!dex_file->Init(error_msg)) {
dex_file.reset();
}
@@ -454,13 +493,13 @@
DexFile::DexFile(const uint8_t* base, size_t size,
const std::string& location,
uint32_t location_checksum,
- MemMap* mem_map,
+ std::unique_ptr<MemMap> mem_map,
const OatDexFile* oat_dex_file)
: begin_(base),
size_(size),
location_(location),
location_checksum_(location_checksum),
- mem_map_(mem_map),
+ mem_map_(std::move(mem_map)),
header_(reinterpret_cast<const Header*>(base)),
string_ids_(reinterpret_cast<const StringId*>(base + header_->string_ids_off_)),
type_ids_(reinterpret_cast<const TypeId*>(base + header_->type_ids_off_)),
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 59339ef..ebbde0a 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -19,7 +19,6 @@
#include <memory>
#include <string>
-#include <unordered_map>
#include <vector>
#include "base/logging.h"
@@ -28,7 +27,6 @@
#include "globals.h"
#include "invoke_type.h"
#include "jni.h"
-#include "jvalue.h"
#include "mirror/object_array.h"
#include "modifiers.h"
#include "utf.h"
@@ -398,11 +396,6 @@
DISALLOW_COPY_AND_ASSIGN(AnnotationItem);
};
- struct AnnotationValue {
- JValue value_;
- uint8_t type_;
- };
-
enum AnnotationResultStyle { // private
kAllObjects,
kPrimitivesOrObjects,
@@ -422,10 +415,6 @@
std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files);
- // Checks whether the given file has the dex magic, or is a zip file with a classes.dex entry.
- // If this function returns false, Open will not succeed. The inverse is not true, however.
- static bool MaybeDex(const char* filename);
-
// Opens .dex file, backed by existing memory
static std::unique_ptr<const DexFile> Open(const uint8_t* base, size_t size,
const std::string& location,
@@ -435,6 +424,18 @@
bool verify_checksum,
std::string* error_msg);
+ // Opens .dex file that has been memory-mapped by the caller.
+ static std::unique_ptr<const DexFile> Open(const std::string& location,
+ uint32_t location_checkum,
+ std::unique_ptr<MemMap> mem_map,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg);
+
+ // Checks whether the given file has the dex magic, or is a zip file with a classes.dex entry.
+ // If this function returns false, Open will not succeed. The inverse is not true, however.
+ static bool MaybeDex(const char* filename);
+
// Open all classesXXX.dex files from a zip archive.
static bool OpenFromZip(const ZipArchive& zip_archive,
const std::string& location,
@@ -934,108 +935,104 @@
}
const AnnotationSetItem* FindAnnotationSetForField(ArtField* field) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* GetAnnotationForField(ArtField* field, Handle<mirror::Class> annotation_class)
- const SHARED_REQUIRES(Locks::mutator_lock_);
+ const REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Object>* GetAnnotationsForField(ArtField* field) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForField(ArtField* field) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool IsFieldAnnotationPresent(ArtField* field, Handle<mirror::Class> annotation_class) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const AnnotationSetItem* FindAnnotationSetForMethod(ArtMethod* method) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const ParameterAnnotationsItem* FindAnnotationsItemForMethod(ArtMethod* method) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* GetAnnotationDefaultValue(ArtMethod* method) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* GetAnnotationForMethod(ArtMethod* method, Handle<mirror::Class> annotation_class)
- const SHARED_REQUIRES(Locks::mutator_lock_);
+ const REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Object>* GetAnnotationsForMethod(ArtMethod* method) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Class>* GetExceptionTypesForMethod(ArtMethod* method) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Object>* GetParameterAnnotations(ArtMethod* method) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForMethod(ArtMethod* method) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool IsMethodAnnotationPresent(ArtMethod* method,
Handle<mirror::Class> annotation_class,
uint32_t visibility = kDexVisibilityRuntime) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const AnnotationSetItem* FindAnnotationSetForClass(Handle<mirror::Class> klass) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* GetAnnotationForClass(Handle<mirror::Class> klass,
Handle<mirror::Class> annotation_class) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Object>* GetAnnotationsForClass(Handle<mirror::Class> klass) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Class>* GetDeclaredClasses(Handle<mirror::Class> klass) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Class* GetDeclaringClass(Handle<mirror::Class> klass) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Class* GetEnclosingClass(Handle<mirror::Class> klass) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* GetEnclosingMethod(Handle<mirror::Class> klass) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool GetInnerClass(Handle<mirror::Class> klass, mirror::String** name) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool GetInnerClassFlags(Handle<mirror::Class> klass, uint32_t* flags) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForClass(Handle<mirror::Class> klass)
- const SHARED_REQUIRES(Locks::mutator_lock_);
+ const REQUIRES_SHARED(Locks::mutator_lock_);
bool IsClassAnnotationPresent(Handle<mirror::Class> klass, Handle<mirror::Class> annotation_class)
- const SHARED_REQUIRES(Locks::mutator_lock_);
+ const REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* CreateAnnotationMember(Handle<mirror::Class> klass,
Handle<mirror::Class> annotation_class,
const uint8_t** annotation) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const AnnotationItem* GetAnnotationItemFromAnnotationSet(Handle<mirror::Class> klass,
const AnnotationSetItem* annotation_set,
uint32_t visibility,
Handle<mirror::Class> annotation_class)
- const SHARED_REQUIRES(Locks::mutator_lock_);
+ const REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* GetAnnotationObjectFromAnnotationSet(Handle<mirror::Class> klass,
const AnnotationSetItem* annotation_set,
uint32_t visibility,
Handle<mirror::Class> annotation_class) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* GetAnnotationValue(Handle<mirror::Class> klass,
const AnnotationItem* annotation_item,
const char* annotation_name,
Handle<mirror::Class> array_class,
uint32_t expected_type) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::String>* GetSignatureValue(Handle<mirror::Class> klass,
const AnnotationSetItem* annotation_set)
- const SHARED_REQUIRES(Locks::mutator_lock_);
+ const REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Class>* GetThrowsValue(Handle<mirror::Class> klass,
const AnnotationSetItem* annotation_set) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Object>* ProcessAnnotationSet(Handle<mirror::Class> klass,
const AnnotationSetItem* annotation_set,
uint32_t visibility) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Object>* ProcessAnnotationSetRefList(Handle<mirror::Class> klass,
const AnnotationSetRefList* set_ref_list, uint32_t size) const
- SHARED_REQUIRES(Locks::mutator_lock_);
- bool ProcessAnnotationValue(Handle<mirror::Class> klass, const uint8_t** annotation_ptr,
- AnnotationValue* annotation_value, Handle<mirror::Class> return_class,
- DexFile::AnnotationResultStyle result_style) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* ProcessEncodedAnnotation(Handle<mirror::Class> klass,
const uint8_t** annotation) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const AnnotationItem* SearchAnnotationSet(const AnnotationSetItem* annotation_set,
const char* descriptor, uint32_t visibility) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const uint8_t* SearchEncodedAnnotation(const uint8_t* annotation, const char* name) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool SkipAnnotationValue(const uint8_t** annotation_ptr) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Debug info opcodes and constants
enum {
@@ -1072,7 +1069,7 @@
//
// This is used by runtime; therefore use art::Method not art::DexFile::Method.
int32_t GetLineNumFromPC(ArtMethod* method, uint32_t rel_pc) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns false if there is no debugging information or if it cannot be decoded.
bool DecodeDebugLocalInfo(const CodeItem* code_item, bool is_static, uint32_t method_idx,
@@ -1175,7 +1172,7 @@
// Opens a .dex file at the given address backed by a MemMap
static std::unique_ptr<const DexFile> OpenMemory(const std::string& location,
uint32_t location_checksum,
- MemMap* mem_map,
+ std::unique_ptr<MemMap> mem_map,
std::string* error_msg);
// Opens a .dex file at the given address, optionally backed by a MemMap
@@ -1183,14 +1180,14 @@
size_t size,
const std::string& location,
uint32_t location_checksum,
- MemMap* mem_map,
+ std::unique_ptr<MemMap> mem_map,
const OatDexFile* oat_dex_file,
std::string* error_msg);
DexFile(const uint8_t* base, size_t size,
const std::string& location,
uint32_t location_checksum,
- MemMap* mem_map,
+ std::unique_ptr<MemMap> mem_map,
const OatDexFile* oat_dex_file);
// Top-level initializer that calls other Init methods.
@@ -1203,6 +1200,12 @@
// whether the string contains the separator character.
static bool IsMultiDexLocation(const char* location);
+ struct AnnotationValue;
+
+ bool ProcessAnnotationValue(Handle<mirror::Class> klass, const uint8_t** annotation_ptr,
+ AnnotationValue* annotation_value, Handle<mirror::Class> return_class,
+ DexFile::AnnotationResultStyle result_style) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
// The base address of the memory mapping.
const uint8_t* const begin_;
@@ -1518,10 +1521,10 @@
Handle<mirror::ClassLoader>* class_loader,
ClassLinker* linker,
const DexFile::ClassDef& class_def)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void ReadValueToField(ArtField* field) const SHARED_REQUIRES(Locks::mutator_lock_);
+ void ReadValueToField(ArtField* field) const REQUIRES_SHARED(Locks::mutator_lock_);
bool HasNext() const { return pos_ < array_size_; }
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 2704d8a..6a06177 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -22,6 +22,7 @@
#include "base/unix_file/fd_file.h"
#include "common_runtime_test.h"
#include "dex_file-inl.h"
+#include "mem_map.h"
#include "os.h"
#include "scoped_thread_state_change.h"
#include "thread-inl.h"
@@ -61,7 +62,7 @@
255, 255, 255, 255
};
-static inline uint8_t* DecodeBase64(const char* src, size_t* dst_size) {
+static inline std::vector<uint8_t> DecodeBase64(const char* src) {
std::vector<uint8_t> tmp;
uint32_t t = 0, y = 0;
int g = 3;
@@ -73,13 +74,11 @@
c = 0;
// prevent g < 0 which would potentially allow an overflow later
if (--g < 0) {
- *dst_size = 0;
- return nullptr;
+ return std::vector<uint8_t>();
}
} else if (g != 3) {
// we only allow = to be at the end
- *dst_size = 0;
- return nullptr;
+ return std::vector<uint8_t>();
}
t = (t << 6) | c;
if (++y == 4) {
@@ -94,17 +93,9 @@
}
}
if (y != 0) {
- *dst_size = 0;
- return nullptr;
+ return std::vector<uint8_t>();
}
- std::unique_ptr<uint8_t[]> dst(new uint8_t[tmp.size()]);
- if (dst_size != nullptr) {
- *dst_size = tmp.size();
- } else {
- *dst_size = 0;
- }
- std::copy(tmp.begin(), tmp.end(), dst.get());
- return dst.release();
+ return tmp;
}
// Although this is the same content logically as the Nested test dex,
@@ -175,14 +166,13 @@
static void DecodeAndWriteDexFile(const char* base64, const char* location) {
// decode base64
CHECK(base64 != nullptr);
- size_t length;
- std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(base64, &length));
- CHECK(dex_bytes.get() != nullptr);
+ std::vector<uint8_t> dex_bytes = DecodeBase64(base64);
+ CHECK_NE(dex_bytes.size(), 0u);
// write to provided file
std::unique_ptr<File> file(OS::CreateEmptyFile(location));
CHECK(file.get() != nullptr);
- if (!file->WriteFully(dex_bytes.get(), length)) {
+ if (!file->WriteFully(dex_bytes.data(), dex_bytes.size())) {
PLOG(FATAL) << "Failed to write base64 as dex file";
}
if (file->FlushCloseOrErase() != 0) {
@@ -208,15 +198,47 @@
return dex_file;
}
-TEST_F(DexFileTest, Header) {
- ScratchFile tmp;
- std::unique_ptr<const DexFile> raw(OpenDexFileBase64(kRawDex, tmp.GetFilename().c_str()));
- ASSERT_TRUE(raw.get() != nullptr);
+static std::unique_ptr<const DexFile> OpenDexFileInMemoryBase64(const char* base64,
+ const char* location,
+ uint32_t location_checksum) {
+ CHECK(base64 != nullptr);
+ std::vector<uint8_t> dex_bytes = DecodeBase64(base64);
+ CHECK_NE(dex_bytes.size(), 0u);
- const DexFile::Header& header = raw->GetHeader();
- // TODO: header.magic_
+ std::string error_message;
+ std::unique_ptr<MemMap> region(MemMap::MapAnonymous("test-region",
+ nullptr,
+ dex_bytes.size(),
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_message));
+ memcpy(region->Begin(), dex_bytes.data(), dex_bytes.size());
+ std::unique_ptr<const DexFile> dex_file(DexFile::Open(location,
+ location_checksum,
+ std::move(region),
+ /* verify */ true,
+ /* verify_checksum */ true,
+ &error_message));
+ CHECK(dex_file != nullptr) << error_message;
+ return dex_file;
+}
+
+static void ValidateDexFileHeader(std::unique_ptr<const DexFile> dex_file) {
+ static const uint8_t kExpectedDexFileMagic[8] = {
+ /* d */ 0x64, /* e */ 0x64, /* x */ 0x78, /* \n */ 0x0d,
+ /* 0 */ 0x30, /* 3 */ 0x33, /* 5 */ 0x35, /* \0 */ 0x00
+ };
+ static const uint8_t kExpectedSha1[DexFile::kSha1DigestSize] = {
+ 0x7b, 0xb8, 0x0c, 0xd4, 0x1f, 0xd6, 0x1e, 0xc5,
+ 0x89, 0xe8, 0xbe, 0xe5, 0x18, 0x02, 0x12, 0x18,
+ 0x2e, 0xf2, 0x8c, 0x3d,
+ };
+
+ const DexFile::Header& header = dex_file->GetHeader();
+ EXPECT_EQ(*kExpectedDexFileMagic, *header.magic_);
EXPECT_EQ(0x00d87910U, header.checksum_);
- // TODO: header.signature_
+ EXPECT_EQ(*kExpectedSha1, *header.signature_);
EXPECT_EQ(904U, header.file_size_);
EXPECT_EQ(112U, header.header_size_);
EXPECT_EQ(0U, header.link_size_);
@@ -236,7 +258,20 @@
EXPECT_EQ(584U, header.data_size_);
EXPECT_EQ(320U, header.data_off_);
- EXPECT_EQ(header.checksum_, raw->GetLocationChecksum());
+ EXPECT_EQ(header.checksum_, dex_file->GetLocationChecksum());
+}
+
+TEST_F(DexFileTest, Header) {
+ ScratchFile tmp;
+ std::unique_ptr<const DexFile> raw(OpenDexFileBase64(kRawDex, tmp.GetFilename().c_str()));
+ ValidateDexFileHeader(std::move(raw));
+}
+
+TEST_F(DexFileTest, HeaderInMemory) {
+ ScratchFile tmp;
+ std::unique_ptr<const DexFile> raw =
+ OpenDexFileInMemoryBase64(kRawDex, tmp.GetFilename().c_str(), 0x00d87910U);
+ ValidateDexFileHeader(std::move(raw));
}
TEST_F(DexFileTest, Version38Accepted) {
diff --git a/runtime/dex_instruction_utils.h b/runtime/dex_instruction_utils.h
index 2849cd8..72d8244 100644
--- a/runtime/dex_instruction_utils.h
+++ b/runtime/dex_instruction_utils.h
@@ -134,74 +134,54 @@
return Instruction::ADD_INT_2ADDR <= code && code <= Instruction::REM_DOUBLE_2ADDR;
}
-// TODO: Remove the #if guards below when we fully migrate to C++14.
-
constexpr bool IsInvokeInstructionRange(Instruction::Code opcode) {
-#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
DCHECK(IsInstructionInvoke(opcode));
-#endif
return opcode >= Instruction::INVOKE_VIRTUAL_RANGE;
}
constexpr DexInvokeType InvokeInstructionType(Instruction::Code opcode) {
-#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
DCHECK(IsInstructionInvoke(opcode));
-#endif
return static_cast<DexInvokeType>(IsInvokeInstructionRange(opcode)
? (opcode - Instruction::INVOKE_VIRTUAL_RANGE)
: (opcode - Instruction::INVOKE_VIRTUAL));
}
constexpr DexMemAccessType IGetMemAccessType(Instruction::Code code) {
-#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
DCHECK(IsInstructionIGet(code));
-#endif
return static_cast<DexMemAccessType>(code - Instruction::IGET);
}
constexpr DexMemAccessType IPutMemAccessType(Instruction::Code code) {
-#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
DCHECK(IsInstructionIPut(code));
-#endif
return static_cast<DexMemAccessType>(code - Instruction::IPUT);
}
constexpr DexMemAccessType SGetMemAccessType(Instruction::Code code) {
-#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
DCHECK(IsInstructionSGet(code));
-#endif
return static_cast<DexMemAccessType>(code - Instruction::SGET);
}
constexpr DexMemAccessType SPutMemAccessType(Instruction::Code code) {
-#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
DCHECK(IsInstructionSPut(code));
-#endif
return static_cast<DexMemAccessType>(code - Instruction::SPUT);
}
constexpr DexMemAccessType AGetMemAccessType(Instruction::Code code) {
-#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
DCHECK(IsInstructionAGet(code));
-#endif
return static_cast<DexMemAccessType>(code - Instruction::AGET);
}
constexpr DexMemAccessType APutMemAccessType(Instruction::Code code) {
-#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
DCHECK(IsInstructionAPut(code));
-#endif
return static_cast<DexMemAccessType>(code - Instruction::APUT);
}
constexpr DexMemAccessType IGetOrIPutMemAccessType(Instruction::Code code) {
-#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
DCHECK(IsInstructionIGetOrIPut(code));
-#endif
return (code >= Instruction::IPUT) ? IPutMemAccessType(code) : IGetMemAccessType(code);
}
-static inline DexMemAccessType IGetQuickOrIPutQuickMemAccessType(Instruction::Code code) {
+inline DexMemAccessType IGetQuickOrIPutQuickMemAccessType(Instruction::Code code) {
DCHECK(IsInstructionIGetQuickOrIPutQuick(code));
switch (code) {
case Instruction::IGET_QUICK: case Instruction::IPUT_QUICK:
@@ -225,16 +205,12 @@
}
constexpr DexMemAccessType SGetOrSPutMemAccessType(Instruction::Code code) {
-#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
DCHECK(IsInstructionSGetOrSPut(code));
-#endif
return (code >= Instruction::SPUT) ? SPutMemAccessType(code) : SGetMemAccessType(code);
}
constexpr DexMemAccessType AGetOrAPutMemAccessType(Instruction::Code code) {
-#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
DCHECK(IsInstructionAGetOrAPut(code));
-#endif
return (code >= Instruction::APUT) ? APutMemAccessType(code) : AGetMemAccessType(code);
}
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 08fec91..d03a9d8 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -44,7 +44,7 @@
const InlineInfo& inline_info,
const InlineInfoEncoding& encoding,
uint8_t inlining_depth)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// This method is being used by artQuickResolutionTrampoline, before it sets up
// the passed parameters in a GC friendly way. Therefore we must never be
// suspended while executing it.
@@ -121,7 +121,7 @@
}
inline ArtMethod* GetCalleeSaveMethodCaller(Thread* self, Runtime::CalleeSaveType type)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return GetCalleeSaveMethodCaller(
self->GetManagedStack()->GetTopQuickFrame(), type, true /* do_caller_check */);
}
@@ -457,7 +457,7 @@
// Explicit template declarations of FindFieldFromCode for all field access types.
#define EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \
-template SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE \
+template REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE \
ArtField* FindFieldFromCode<_type, _access_check>(uint32_t field_idx, \
ArtMethod* referrer, \
Thread* self, size_t expected_size) \
@@ -640,7 +640,7 @@
// Explicit template declarations of FindMethodFromCode for all invoke types.
#define EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \
- template SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE \
+ template REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE \
ArtMethod* FindMethodFromCode<_type, _access_check>(uint32_t method_idx, \
mirror::Object** this_object, \
ArtMethod* referrer, \
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index fd1c02f..4056ec5 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -43,7 +43,7 @@
ArtMethod* referrer,
Thread* self,
bool access_check)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(component_count < 0)) {
ThrowNegativeArraySizeException(component_count);
return nullptr; // Failure
@@ -259,7 +259,7 @@
ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp,
Runtime::CalleeSaveType type,
bool do_caller_check)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(type));
const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, type);
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index a28376f..f88e81d 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -47,12 +47,12 @@
ALWAYS_INLINE inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
ArtMethod* method,
Thread* self, bool* slow_path)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
Thread* self,
bool* slow_path)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it
// cannot be resolved, throw an error. If it can, use it to create an instance.
@@ -63,21 +63,21 @@
ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Given the context of a calling Method and a resolved class, create an instance.
template <bool kInstrumented>
ALWAYS_INLINE inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
Thread* self,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Given the context of a calling Method and an initialized class, create an instance.
template <bool kInstrumented>
ALWAYS_INLINE inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
Thread* self,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <bool kAccessCheck>
@@ -85,7 +85,7 @@
int32_t component_count,
ArtMethod* method,
bool* slow_path)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Given the context of a calling Method, use its DexCache to resolve a type to an array Class. If
// it cannot be resolved, throw an error. If it can, use it to create an array.
@@ -97,7 +97,7 @@
ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass,
@@ -105,13 +105,13 @@
ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, int32_t component_count,
ArtMethod* method, Thread* self,
bool access_check,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
extern mirror::Array* CheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx,
int32_t component_count,
@@ -119,7 +119,7 @@
Thread* self,
bool access_check,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Type of find field operation for fast and slow case.
enum FindFieldType {
@@ -136,45 +136,45 @@
template<FindFieldType type, bool access_check>
inline ArtField* FindFieldFromCode(
uint32_t field_idx, ArtMethod* referrer, Thread* self, size_t expected_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<InvokeType type, bool access_check>
inline ArtMethod* FindMethodFromCode(
uint32_t method_idx, mirror::Object** this_object, ArtMethod* referrer, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Fast path field resolution that can't initialize classes or throw exceptions.
inline ArtField* FindFieldFast(
uint32_t field_idx, ArtMethod* referrer, FindFieldType type, size_t expected_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Fast path method resolution that can't throw exceptions.
inline ArtMethod* FindMethodFast(
uint32_t method_idx, mirror::Object* this_object, ArtMethod* referrer, bool access_check,
InvokeType type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
inline mirror::Class* ResolveVerifyAndClinit(
uint32_t type_idx, ArtMethod* referrer, Thread* self, bool can_run_clinit, bool verify_access)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
inline mirror::String* ResolveStringFromCode(ArtMethod* referrer, uint32_t string_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// TODO: annotalysis disabled as monitor semantics are maintained in Java code.
inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self)
NO_THREAD_SAFETY_ANALYSIS;
void CheckReferenceResult(mirror::Object* o, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, const char* shorty,
jobject rcvr_jobj, jobject interface_art_method_jobj,
std::vector<jvalue>& args)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool FillArrayData(mirror::Object* obj, const Instruction::ArrayDataPayload* payload)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <typename INT_TYPE, typename FLOAT_TYPE>
inline INT_TYPE art_float_to_integral(FLOAT_TYPE f);
diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h
index a81a7e7..df37f95 100644
--- a/runtime/entrypoints/quick/callee_save_frame.h
+++ b/runtime/entrypoints/quick/callee_save_frame.h
@@ -40,32 +40,32 @@
explicit ScopedQuickEntrypointChecks(Thread *self,
bool entry_check = kIsDebugBuild,
bool exit_check = kIsDebugBuild)
- SHARED_REQUIRES(Locks::mutator_lock_) : self_(self), exit_check_(exit_check) {
+ REQUIRES_SHARED(Locks::mutator_lock_) : self_(self), exit_check_(exit_check) {
if (entry_check) {
TestsOnEntry();
}
}
- ScopedQuickEntrypointChecks() SHARED_REQUIRES(Locks::mutator_lock_)
+ ScopedQuickEntrypointChecks() REQUIRES_SHARED(Locks::mutator_lock_)
: self_(kIsDebugBuild ? Thread::Current() : nullptr), exit_check_(kIsDebugBuild) {
if (kIsDebugBuild) {
TestsOnEntry();
}
}
- ~ScopedQuickEntrypointChecks() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ~ScopedQuickEntrypointChecks() REQUIRES_SHARED(Locks::mutator_lock_) {
if (exit_check_) {
TestsOnExit();
}
}
private:
- void TestsOnEntry() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void TestsOnEntry() REQUIRES_SHARED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(self_);
self_->VerifyStack();
}
- void TestsOnExit() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void TestsOnExit() REQUIRES_SHARED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(self_);
self_->VerifyStack();
}
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index 4686a51..dc5fd07 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -31,7 +31,7 @@
#define GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, suffix2, instrumented_bool, allocator_type) \
extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \
uint32_t type_idx, ArtMethod* method, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !(instrumented_bool) && (allocator_type) == gc::kAllocatorTypeTLAB) { \
mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx, kRuntimePointerSize); \
@@ -58,7 +58,7 @@
} \
extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \
mirror::Class* klass, ArtMethod* method ATTRIBUTE_UNUSED, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !(instrumented_bool) && (allocator_type) == gc::kAllocatorTypeTLAB) { \
if (LIKELY(klass->IsInitialized())) { \
@@ -84,7 +84,7 @@
} \
extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \
mirror::Class* klass, ArtMethod* method ATTRIBUTE_UNUSED, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !(instrumented_bool) && (allocator_type) == gc::kAllocatorTypeTLAB) { \
size_t byte_count = klass->GetObjectSize(); \
@@ -108,34 +108,34 @@
} \
extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheck##suffix##suffix2( \
uint32_t type_idx, ArtMethod* method, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
return AllocObjectFromCode<true, instrumented_bool>(type_idx, method, self, allocator_type); \
} \
extern "C" mirror::Array* artAllocArrayFromCode##suffix##suffix2( \
uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
return AllocArrayFromCode<false, instrumented_bool>(type_idx, component_count, method, self, \
allocator_type); \
} \
extern "C" mirror::Array* artAllocArrayFromCodeResolved##suffix##suffix2( \
mirror::Class* klass, int32_t component_count, ArtMethod* method, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
return AllocArrayFromCodeResolved<false, instrumented_bool>(klass, component_count, method, self, \
allocator_type); \
} \
extern "C" mirror::Array* artAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \
uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
return AllocArrayFromCode<true, instrumented_bool>(type_idx, component_count, method, self, \
allocator_type); \
} \
extern "C" mirror::Array* artCheckAndAllocArrayFromCode##suffix##suffix2( \
uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
if (!(instrumented_bool)) { \
return CheckAndAllocArrayFromCode(type_idx, component_count, method, self, false, allocator_type); \
@@ -145,7 +145,7 @@
} \
extern "C" mirror::Array* artCheckAndAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \
uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
if (!(instrumented_bool)) { \
return CheckAndAllocArrayFromCode(type_idx, component_count, method, self, true, allocator_type); \
@@ -156,7 +156,7 @@
extern "C" mirror::String* artAllocStringFromBytesFromCode##suffix##suffix2( \
mirror::ByteArray* byte_array, int32_t high, int32_t offset, int32_t byte_count, \
Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
StackHandleScope<1> hs(self); \
Handle<mirror::ByteArray> handle_array(hs.NewHandle(byte_array)); \
@@ -165,7 +165,7 @@
} \
extern "C" mirror::String* artAllocStringFromCharsFromCode##suffix##suffix2( \
int32_t offset, int32_t char_count, mirror::CharArray* char_array, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
StackHandleScope<1> hs(self); \
Handle<mirror::CharArray> handle_array(hs.NewHandle(char_array)); \
return mirror::String::AllocFromCharArray<instrumented_bool>(self, char_count, handle_array, \
@@ -173,7 +173,7 @@
} \
extern "C" mirror::String* artAllocStringFromStringFromCode##suffix##suffix2( /* NOLINT */ \
mirror::String* string, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
StackHandleScope<1> hs(self); \
Handle<mirror::String> handle_string(hs.NewHandle(string)); \
return mirror::String::AllocFromString<instrumented_bool>(self, handle_string->GetLength(), \
diff --git a/runtime/entrypoints/quick/quick_cast_entrypoints.cc b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
index 8db69a3..2732d68 100644
--- a/runtime/entrypoints/quick/quick_cast_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
@@ -21,7 +21,7 @@
// Assignable test for code, won't throw. Null and equality tests already performed
extern "C" size_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(klass != nullptr);
DCHECK(ref_class != nullptr);
return klass->IsAssignableFrom(ref_class) ? 1 : 0;
diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
index f35c2fe..5b9d03b 100644
--- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
@@ -23,14 +23,13 @@
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
#include "quick_exception_handler.h"
-#include "stack.h"
#include "thread.h"
#include "verifier/method_verifier.h"
namespace art {
NO_RETURN static void artDeoptimizeImpl(Thread* self, bool single_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (VLOG_IS_ON(deopt)) {
if (single_frame) {
// Deopt logging will be in DeoptimizeSingleFrame. It is there to take advantage of the
@@ -60,14 +59,14 @@
}
}
-extern "C" NO_RETURN void artDeoptimize(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
+extern "C" NO_RETURN void artDeoptimize(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
artDeoptimizeImpl(self, false);
}
// This is called directly from compiled code by an HDepptimize.
extern "C" NO_RETURN void artDeoptimizeFromCompiledCode(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
// Before deoptimizing to interpreter, we must push the deoptimization context.
JValue return_value;
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index c045e84..2cd0331 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -26,7 +26,7 @@
namespace art {
extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Called to ensure static storage base is initialized for direct static field reads and writes.
// A class may be accessing another class' fields when it doesn't have access, as access has been
// given by inheritance.
@@ -36,7 +36,7 @@
}
extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Called when method->dex_cache_resolved_types_[] misses.
ScopedQuickEntrypointChecks sqec(self);
auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kSaveRefsOnly);
@@ -44,7 +44,7 @@
}
extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Called when caller isn't guaranteed to have access to a type and the dex cache may be
// unpopulated.
ScopedQuickEntrypointChecks sqec(self);
@@ -53,7 +53,7 @@
}
extern "C" mirror::String* artResolveStringFromCode(int32_t string_idx, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kSaveRefsOnly);
return ResolveStringFromCode(caller, string_idx);
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index 08e0d6e..89712a3 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -86,7 +86,7 @@
// barrier fast path implementations generated by the compiler to mark
// an object that is referenced by a field of a gray object.
extern "C" mirror::Object* artReadBarrierMark(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) HOT_ATTR;
// Read barrier entrypoint for heap references.
// This is the read barrier slow path for instance and static fields
@@ -94,11 +94,11 @@
extern "C" mirror::Object* artReadBarrierSlow(mirror::Object* ref,
mirror::Object* obj,
uint32_t offset)
- SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) HOT_ATTR;
// Read barrier entrypoint for GC roots.
extern "C" mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root)
- SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) HOT_ATTR;
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_entrypoints_enum.cc b/runtime/entrypoints/quick/quick_entrypoints_enum.cc
new file mode 100644
index 0000000..7b80af6
--- /dev/null
+++ b/runtime/entrypoints/quick/quick_entrypoints_enum.cc
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "quick_entrypoints_enum.h"
+
+namespace art {
+
+bool EntrypointRequiresStackMap(QuickEntrypointEnum trampoline) {
+ // Entrypoints that do not require a stackmap. In general leaf methods
+ // outside of the VM that are not safepoints.
+ switch (trampoline) {
+ // Listed in the same order as in quick_entrypoints_list.h.
+ case kQuickCmpgDouble:
+ case kQuickCmpgFloat:
+ case kQuickCmplDouble:
+ case kQuickCmplFloat:
+ case kQuickCos:
+ case kQuickSin:
+ case kQuickAcos:
+ case kQuickAsin:
+ case kQuickAtan:
+ case kQuickAtan2:
+ case kQuickCbrt:
+ case kQuickCosh:
+ case kQuickExp:
+ case kQuickExpm1:
+ case kQuickHypot:
+ case kQuickLog:
+ case kQuickLog10:
+ case kQuickNextAfter:
+ case kQuickSinh:
+ case kQuickTan:
+ case kQuickTanh:
+ case kQuickFmod:
+ case kQuickL2d:
+ case kQuickFmodf:
+ case kQuickL2f:
+ case kQuickD2iz:
+ case kQuickF2iz:
+ case kQuickIdivmod:
+ case kQuickD2l:
+ case kQuickF2l:
+ case kQuickLdiv:
+ case kQuickLmod:
+ case kQuickLmul:
+ case kQuickShlLong:
+ case kQuickShrLong:
+ case kQuickUshrLong:
+ return false;
+
+ /* Used by mips for 64bit volatile load/stores. */
+ case kQuickA64Load:
+ case kQuickA64Store:
+ return false;
+
+ default:
+ return true;
+ }
+}
+
+} // namespace art
diff --git a/runtime/entrypoints/quick/quick_entrypoints_enum.h b/runtime/entrypoints/quick/quick_entrypoints_enum.h
index 8de1137..7674873 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_enum.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_enum.h
@@ -62,6 +62,8 @@
#undef QUICK_ENTRYPOINT_LIST
#undef ENTRYPOINT_ENUM
+bool EntrypointRequiresStackMap(QuickEntrypointEnum trampoline);
+
} // namespace art
#endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_ENUM_H_
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index 1a12bd4..5b65029 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -44,7 +44,7 @@
size_t size,
mirror::Object** obj)
REQUIRES(!Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<1> hs(self);
HandleWrapper<mirror::Object> h(hs.NewHandleWrapper(obj));
ArtField* field = FindFieldFromCode<type, kAccessCheck>(field_idx, referrer, self, size);
@@ -56,7 +56,7 @@
}
extern "C" ssize_t artGetByteStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int8_t));
if (LIKELY(field != nullptr)) {
@@ -70,7 +70,7 @@
}
extern "C" size_t artGetBooleanStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int8_t));
if (LIKELY(field != nullptr)) {
@@ -84,7 +84,7 @@
}
extern "C" ssize_t artGetShortStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int16_t));
if (LIKELY(field != nullptr)) {
@@ -98,7 +98,7 @@
}
extern "C" size_t artGetCharStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int16_t));
if (LIKELY(field != nullptr)) {
@@ -112,7 +112,7 @@
}
extern "C" size_t artGet32StaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int32_t));
if (LIKELY(field != nullptr)) {
@@ -128,7 +128,7 @@
extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int64_t));
if (LIKELY(field != nullptr)) {
@@ -144,7 +144,7 @@
extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx,
referrer,
@@ -167,7 +167,7 @@
mirror::Object* obj,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int8_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -188,7 +188,7 @@
mirror::Object* obj,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int8_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -208,7 +208,7 @@
mirror::Object* obj,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int16_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -229,7 +229,7 @@
mirror::Object* obj,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int16_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -250,7 +250,7 @@
mirror::Object* obj,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int32_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -271,7 +271,7 @@
mirror::Object* obj,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int64_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -292,7 +292,7 @@
mirror::Object* obj,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx,
referrer,
@@ -316,7 +316,7 @@
uint32_t new_value,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int8_t));
if (LIKELY(field != nullptr)) {
@@ -349,7 +349,7 @@
uint16_t new_value,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int16_t));
if (LIKELY(field != nullptr)) {
@@ -382,7 +382,7 @@
uint32_t new_value,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int32_t));
if (LIKELY(field != nullptr)) {
@@ -403,7 +403,7 @@
ArtMethod* referrer,
uint64_t new_value,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int64_t));
if (LIKELY(field != nullptr)) {
@@ -424,7 +424,7 @@
mirror::Object* new_value,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx,
referrer,
@@ -459,7 +459,7 @@
uint8_t new_value,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int8_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -496,7 +496,7 @@
uint16_t new_value,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int16_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -534,7 +534,7 @@
uint32_t new_value,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int32_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -560,7 +560,7 @@
uint64_t new_value,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int64_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -586,7 +586,7 @@
mirror::Object* new_value,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx,
referrer,
diff --git a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
index 22b2fa3..f63c9c2 100644
--- a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
@@ -26,7 +26,7 @@
*/
extern "C" int artHandleFillArrayDataFromCode(uint32_t payload_offset, mirror::Array* array,
ArtMethod* method, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
const uint16_t* const insns = method->GetCodeItem()->insns_;
const Instruction::ArrayDataPayload* payload =
diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
index 82d5467..fec7373 100644
--- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
@@ -29,7 +29,7 @@
mirror::Object* this_object,
Thread* self,
uintptr_t lr)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Instrumentation changes the stack. Thus, when exiting, the stack cannot be verified, so skip
// that part.
ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false);
@@ -51,7 +51,7 @@
extern "C" TwoWordReturn artInstrumentationMethodExitFromCode(Thread* self, ArtMethod** sp,
uint64_t gpr_result,
uint64_t fpr_result)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Instrumentation exit stub must not be entered with a pending exception.
CHECK(!self->IsExceptionPending()) << "Enter instrumentation exit stub with pending exception "
<< self->GetException()->Dump();
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index c06824c..64f19af 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -78,7 +78,7 @@
}
static void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JNIEnvExt* env = self->GetJniEnv();
if (UNLIKELY(env->check_jni)) {
env->CheckNoHeldMonitors();
diff --git a/runtime/entrypoints/quick/quick_lock_entrypoints.cc b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
index 4adb39b..b4f945a 100644
--- a/runtime/entrypoints/quick/quick_lock_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
@@ -23,7 +23,7 @@
extern "C" int artLockObjectFromCode(mirror::Object* obj, Thread* self)
NO_THREAD_SAFETY_ANALYSIS
REQUIRES(!Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_) /* EXCLUSIVE_LOCK_FUNCTION(Monitor::monitor_lock_) */ {
+ REQUIRES_SHARED(Locks::mutator_lock_) /* EXCLUSIVE_LOCK_FUNCTION(Monitor::monitor_lock_) */ {
ScopedQuickEntrypointChecks sqec(self);
if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerException("Null reference used for synchronization (monitor-enter)");
@@ -44,7 +44,7 @@
extern "C" int artUnlockObjectFromCode(mirror::Object* obj, Thread* self)
NO_THREAD_SAFETY_ANALYSIS
REQUIRES(!Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_) /* UNLOCK_FUNCTION(Monitor::monitor_lock_) */ {
+ REQUIRES_SHARED(Locks::mutator_lock_) /* UNLOCK_FUNCTION(Monitor::monitor_lock_) */ {
ScopedQuickEntrypointChecks sqec(self);
if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerException("Null reference used for synchronization (monitor-exit)");
diff --git a/runtime/entrypoints/quick/quick_thread_entrypoints.cc b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
index 47b3eff..0838059 100644
--- a/runtime/entrypoints/quick/quick_thread_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
@@ -19,7 +19,7 @@
namespace art {
-extern "C" void artTestSuspendFromCode(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
+extern "C" void artTestSuspendFromCode(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
// Called when suspend count check value is 0 and thread->suspend_count_ != 0
ScopedQuickEntrypointChecks sqec(self);
self->CheckSuspend();
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index ea9f7b0..67cae8a 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -24,14 +24,14 @@
// Deliver an exception that's pending on thread helping set up a callee save frame on the way.
extern "C" NO_RETURN void artDeliverPendingExceptionFromCode(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
self->QuickDeliverException();
}
// Called by generated code to throw an exception.
extern "C" NO_RETURN void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
/*
* exception may be null, in which case this routine should
* throw NPE. NOTE: this is a convenience for generated code,
@@ -50,7 +50,7 @@
// Called by generated code to throw a NPE exception.
extern "C" NO_RETURN void artThrowNullPointerExceptionFromCode(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
// We come from an explicit check in the generated code. This path is triggered
// only if the object is indeed null.
@@ -60,7 +60,7 @@
// Installed by a signal handler to throw a NPE exception.
extern "C" NO_RETURN void artThrowNullPointerExceptionFromSignal(uintptr_t addr, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
self->NoteSignalBeingHandled();
ThrowNullPointerExceptionFromDexPC(/* check_address */ true, addr);
@@ -70,7 +70,7 @@
// Called by generated code to throw an arithmetic divide by zero exception.
extern "C" NO_RETURN void artThrowDivZeroFromCode(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowArithmeticExceptionDivideByZero();
self->QuickDeliverException();
@@ -78,7 +78,7 @@
// Called by generated code to throw an array index out of bounds exception.
extern "C" NO_RETURN void artThrowArrayBoundsFromCode(int index, int length, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowArrayIndexOutOfBoundsException(index, length);
self->QuickDeliverException();
@@ -86,14 +86,14 @@
// Called by generated code to throw a string index out of bounds exception.
extern "C" NO_RETURN void artThrowStringBoundsFromCode(int index, int length, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowStringIndexOutOfBoundsException(index, length);
self->QuickDeliverException();
}
extern "C" NO_RETURN void artThrowStackOverflowFromCode(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
self->NoteSignalBeingHandled();
ThrowStackOverflowError(self);
@@ -102,7 +102,7 @@
}
extern "C" NO_RETURN void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowNoSuchMethodError(method_idx);
self->QuickDeliverException();
@@ -111,7 +111,7 @@
extern "C" NO_RETURN void artThrowClassCastException(mirror::Class* dest_type,
mirror::Class* src_type,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
DCHECK(!dest_type->IsAssignableFrom(src_type));
ThrowClassCastException(dest_type, src_type);
@@ -120,7 +120,7 @@
extern "C" NO_RETURN void artThrowArrayStoreException(mirror::Object* array, mirror::Object* value,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowArrayStoreException(value->GetClass(), array->GetClass());
self->QuickDeliverException();
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index c67379a..3043c83 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -295,7 +295,7 @@
// kRefAndArgs runtime method. Since 'this' is a reference, it is located in the
// 1st GPR.
static mirror::Object* GetProxyThisObject(ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK((*sp)->IsProxyMethod());
CHECK_GT(kNumQuickGprArgs, 0u);
constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR.
@@ -305,19 +305,19 @@
return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address)->AsMirrorPtr();
}
- static ArtMethod* GetCallingMethod(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static ArtMethod* GetCallingMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK((*sp)->IsCalleeSaveMethod());
return GetCalleeSaveMethodCaller(sp, Runtime::kSaveRefsAndArgs);
}
- static ArtMethod* GetOuterMethod(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static ArtMethod* GetOuterMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK((*sp)->IsCalleeSaveMethod());
uint8_t* previous_sp =
reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
return *reinterpret_cast<ArtMethod**>(previous_sp);
}
- static uint32_t GetCallingDexPc(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static uint32_t GetCallingDexPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK((*sp)->IsCalleeSaveMethod());
const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kSaveRefsAndArgs);
ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>(
@@ -344,14 +344,14 @@
}
// For the given quick ref and args quick frame, return the caller's PC.
- static uintptr_t GetCallingPc(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static uintptr_t GetCallingPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK((*sp)->IsCalleeSaveMethod());
uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset;
return *reinterpret_cast<uintptr_t*>(lr);
}
QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty,
- uint32_t shorty_len) SHARED_REQUIRES(Locks::mutator_lock_) :
+ uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) :
is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len),
gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset),
fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset),
@@ -436,7 +436,7 @@
}
}
- void VisitArguments() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void VisitArguments() REQUIRES_SHARED(Locks::mutator_lock_) {
// (a) 'stack_args_' should point to the first method's argument
// (b) whatever the argument type it is, the 'stack_index_' should
// be moved forward along with every visiting.
@@ -589,7 +589,7 @@
// Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It
// allows to use the QuickArgumentVisitor constants without moving all the code in its own module.
extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return QuickArgumentVisitor::GetProxyThisObject(sp);
}
@@ -600,7 +600,7 @@
uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
- void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
private:
ShadowFrame* const sf_;
@@ -643,7 +643,7 @@
}
extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Ensure we don't get thread suspension until the object arguments are safely in the shadow
// frame.
ScopedQuickEntrypointChecks sqec(self);
@@ -698,10 +698,10 @@
if (kIsDebugBuild) {
class DummyStackVisitor : public StackVisitor {
public:
- explicit DummyStackVisitor(Thread* self_in) SHARED_REQUIRES(Locks::mutator_lock_)
+ explicit DummyStackVisitor(Thread* self_in) REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(self_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
// Nothing to do here. In a debug build, SanityCheckFrame will do the work in the walking
// logic. Just always say we want to continue.
return true;
@@ -782,9 +782,9 @@
ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
- void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
- void FixupReferences() SHARED_REQUIRES(Locks::mutator_lock_);
+ void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_);
private:
ScopedObjectAccessUnchecked* const soa_;
@@ -843,7 +843,7 @@
// field within the proxy object, which will box the primitive arguments and deal with error cases.
extern "C" uint64_t artQuickProxyInvokeHandler(
ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method);
DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method);
// Ensure we don't get thread suspension until the object arguments are safely in jobjects.
@@ -899,9 +899,9 @@
uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
- void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
- void FixupReferences() SHARED_REQUIRES(Locks::mutator_lock_);
+ void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_);
private:
ScopedObjectAccessUnchecked* const soa_;
@@ -932,7 +932,7 @@
// Lazily resolve a method for quick. Called by stub code.
extern "C" const void* artQuickResolutionTrampoline(
ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// The resolution trampoline stashes the resolved method into the callee-save frame to transport
// it. Thus, when exiting, the stack cannot be verified (as the resolved method most likely
// does not have the same stack layout as the callee-save method).
@@ -1309,7 +1309,7 @@
return gpr_index_ > 0;
}
- void AdvanceHandleScope(mirror::Object* ptr) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void AdvanceHandleScope(mirror::Object* ptr) REQUIRES_SHARED(Locks::mutator_lock_) {
uintptr_t handle = PushHandle(ptr);
if (HaveHandleScopeGpr()) {
gpr_index_--;
@@ -1497,7 +1497,7 @@
void PushStack(uintptr_t val) {
delegate_->PushStack(val);
}
- uintptr_t PushHandle(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_) {
+ uintptr_t PushHandle(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) {
return delegate_->PushHandle(ref);
}
@@ -1557,10 +1557,10 @@
virtual void WalkHeader(
BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
}
- void Walk(const char* shorty, uint32_t shorty_len) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Walk(const char* shorty, uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) {
BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this);
WalkHeader(&sm);
@@ -1632,7 +1632,7 @@
//
// Note: assumes ComputeAll() has been run before.
void LayoutCalleeSaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = **m;
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
@@ -1673,7 +1673,7 @@
// Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie.
// Returns the new bottom. Note: this may be unaligned.
uint8_t* LayoutJNISaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// First, fix up the layout of the callee-save frame.
// We have to squeeze in the HandleScope, and relocate the method pointer.
LayoutCalleeSaveFrame(self, m, sp, handle_scope);
@@ -1691,7 +1691,7 @@
uint8_t* ComputeLayout(Thread* self, ArtMethod*** m, const char* shorty, uint32_t shorty_len,
HandleScope** handle_scope, uintptr_t** start_stack, uintptr_t** start_gpr,
uint32_t** start_fpr)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Walk(shorty, shorty_len);
// JNI part.
@@ -1707,7 +1707,7 @@
// Add JNIEnv* and jobj/jclass before the shorty-derived elements.
void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
uint32_t num_handle_scope_references_;
@@ -1763,7 +1763,7 @@
cur_stack_arg_++;
}
- virtual uintptr_t PushHandle(mirror::Object*) SHARED_REQUIRES(Locks::mutator_lock_) {
+ virtual uintptr_t PushHandle(mirror::Object*) REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(FATAL) << "(Non-JNI) Native call does not use handles.";
UNREACHABLE();
}
@@ -1801,15 +1801,15 @@
}
}
- void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
- void FinalizeHandleScope(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
+ void FinalizeHandleScope(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
StackReference<mirror::Object>* GetFirstHandleScopeEntry() {
return handle_scope_->GetHandle(0).GetReference();
}
- jobject GetFirstHandleScopeJObject() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ jobject GetFirstHandleScopeJObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
return handle_scope_->GetHandle(0).ToJObject();
}
@@ -1825,7 +1825,7 @@
HandleScope* handle_scope) : FillNativeCall(gpr_regs, fpr_regs, stack_args),
handle_scope_(handle_scope), cur_entry_(0) {}
- uintptr_t PushHandle(mirror::Object* ref) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ uintptr_t PushHandle(mirror::Object* ref) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) {
FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args);
@@ -1833,7 +1833,7 @@
cur_entry_ = 0U;
}
- void ResetRemainingScopeSlots() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void ResetRemainingScopeSlots() REQUIRES_SHARED(Locks::mutator_lock_) {
// Initialize padding entries.
size_t expected_slots = handle_scope_->NumberOfReferences();
while (cur_entry_ < expected_slots) {
@@ -1953,7 +1953,7 @@
* 2) An error, if the value is negative.
*/
extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* called = *sp;
DCHECK(called->IsNative()) << PrettyMethod(called, true);
uint32_t shorty_len = 0;
@@ -2048,7 +2048,7 @@
// for the method pointer.
//
// It is valid to use this, as at the usage points here (returns from C functions) we are assuming
-// to hold the mutator lock (see SHARED_REQUIRES(Locks::mutator_lock_) annotations).
+// to hold the mutator lock (see REQUIRES_SHARED(Locks::mutator_lock_) annotations).
template<InvokeType type, bool access_check>
static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, Thread* self,
@@ -2090,7 +2090,7 @@
// Explicit artInvokeCommon template function declarations to please analysis tool.
#define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check) \
- template SHARED_REQUIRES(Locks::mutator_lock_) \
+ template REQUIRES_SHARED(Locks::mutator_lock_) \
TwoWordReturn artInvokeCommon<type, access_check>( \
uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
@@ -2109,31 +2109,31 @@
// See comments in runtime_support_asm.S
extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck(
uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return artInvokeCommon<kInterface, true>(method_idx, this_object, self, sp);
}
extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck(
uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return artInvokeCommon<kDirect, true>(method_idx, this_object, self, sp);
}
extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck(
uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return artInvokeCommon<kStatic, true>(method_idx, this_object, self, sp);
}
extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck(
uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return artInvokeCommon<kSuper, true>(method_idx, this_object, self, sp);
}
extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck(
uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return artInvokeCommon<kVirtual, true>(method_idx, this_object, self, sp);
}
@@ -2144,7 +2144,7 @@
mirror::Object* this_object,
Thread* self,
ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
StackHandleScope<1> hs(self);
Handle<mirror::Class> cls(hs.NewHandle(this_object->GetClass()));
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index 004cdc4..b102334 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -37,7 +37,7 @@
// name.
#define EXPECT_OFFSET_DIFF(first_type, first_field, second_type, second_field, diff, name) \
CHECKED(OFFSETOF_MEMBER(second_type, second_field) \
- - OFFSETOF_MEMBER(first_type, first_field) == diff, name)
+ - OFFSETOF_MEMBER(first_type, first_field) == (diff), name)
// Helper macro for when the fields are from the same type.
#define EXPECT_OFFSET_DIFFNP(type, first_field, second_field, diff) \
@@ -45,15 +45,16 @@
type ## _ ## first_field ## _ ## second_field)
// Helper macro for when the fields are from the same type and in the same member of said type.
+// NOLINT, do not add parentheses around 'prefix'.
#define EXPECT_OFFSET_DIFFP(type, prefix, first_field, second_field, diff) \
- EXPECT_OFFSET_DIFF(type, prefix . first_field, type, prefix . second_field, diff, \
+ EXPECT_OFFSET_DIFF(type, prefix . first_field, type, prefix . second_field, diff, /* NOLINT */ \
type ## _ ## prefix ## _ ## first_field ## _ ## second_field)
// Macro to check whether two fields have at least an expected difference in offsets. The error is
// named name.
#define EXPECT_OFFSET_DIFF_GT(first_type, first_field, second_type, second_field, diff, name) \
CHECKED(OFFSETOF_MEMBER(second_type, second_field) \
- - OFFSETOF_MEMBER(first_type, first_field) >= diff, name)
+ - OFFSETOF_MEMBER(first_type, first_field) >= (diff), name)
// Helper macro for when the fields are from the same type.
#define EXPECT_OFFSET_DIFF_GT3(type, first_field, second_field, diff, name) \
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 45db500..db9568a 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -73,12 +73,12 @@
// Beware: Mixing atomic pushes and atomic pops will cause ABA problem.
// Returns false if we overflowed the stack.
- bool AtomicPushBackIgnoreGrowthLimit(T* value) SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool AtomicPushBackIgnoreGrowthLimit(T* value) REQUIRES_SHARED(Locks::mutator_lock_) {
return AtomicPushBackInternal(value, capacity_);
}
// Returns false if we overflowed the stack.
- bool AtomicPushBack(T* value) SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool AtomicPushBack(T* value) REQUIRES_SHARED(Locks::mutator_lock_) {
return AtomicPushBackInternal(value, growth_limit_);
}
@@ -86,7 +86,7 @@
// slots. Returns false if we overflowed the stack.
bool AtomicBumpBack(size_t num_slots, StackReference<T>** start_address,
StackReference<T>** end_address)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (kIsDebugBuild) {
debug_is_sorted_ = false;
}
@@ -112,7 +112,7 @@
return true;
}
- void AssertAllZero() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void AssertAllZero() REQUIRES_SHARED(Locks::mutator_lock_) {
if (kIsDebugBuild) {
for (size_t i = 0; i < capacity_; ++i) {
DCHECK_EQ(begin_[i].AsMirrorPtr(), static_cast<T*>(nullptr)) << "i=" << i;
@@ -120,7 +120,7 @@
}
}
- void PushBack(T* value) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void PushBack(T* value) REQUIRES_SHARED(Locks::mutator_lock_) {
if (kIsDebugBuild) {
debug_is_sorted_ = false;
}
@@ -130,7 +130,7 @@
begin_[index].Assign(value);
}
- T* PopBack() SHARED_REQUIRES(Locks::mutator_lock_) {
+ T* PopBack() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_GT(back_index_.LoadRelaxed(), front_index_.LoadRelaxed());
// Decrement the back index non atomically.
back_index_.StoreRelaxed(back_index_.LoadRelaxed() - 1);
@@ -193,12 +193,12 @@
}
}
- bool ContainsSorted(const T* value) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool ContainsSorted(const T* value) const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(debug_is_sorted_);
return std::binary_search(Begin(), End(), value, ObjectComparator());
}
- bool Contains(const T* value) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool Contains(const T* value) const REQUIRES_SHARED(Locks::mutator_lock_) {
for (auto cur = Begin(), end = End(); cur != end; ++cur) {
if (cur->AsMirrorPtr() == value) {
return true;
@@ -220,7 +220,7 @@
// Returns false if we overflowed the stack.
bool AtomicPushBackInternal(T* value, size_t limit) ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (kIsDebugBuild) {
debug_is_sorted_ = false;
}
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index b6af908..969bfb7 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -108,7 +108,7 @@
const Visitor& visitor,
const uint8_t minimum_age = kCardDirty) const
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Assertion used to check the given address is covered by the card table
void CheckAddrIsInCardTable(const uint8_t* addr) const;
diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h
index 0b96979..76247bc 100644
--- a/runtime/gc/accounting/heap_bitmap.h
+++ b/runtime/gc/accounting/heap_bitmap.h
@@ -35,26 +35,26 @@
class HeapBitmap {
public:
- bool Test(const mirror::Object* obj) SHARED_REQUIRES(Locks::heap_bitmap_lock_);
+ bool Test(const mirror::Object* obj) REQUIRES_SHARED(Locks::heap_bitmap_lock_);
void Clear(const mirror::Object* obj) REQUIRES(Locks::heap_bitmap_lock_);
template<typename LargeObjectSetVisitor>
bool Set(const mirror::Object* obj, const LargeObjectSetVisitor& visitor)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_) ALWAYS_INLINE;
template<typename LargeObjectSetVisitor>
bool AtomicTestAndSet(const mirror::Object* obj, const LargeObjectSetVisitor& visitor)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_) ALWAYS_INLINE;
ContinuousSpaceBitmap* GetContinuousSpaceBitmap(const mirror::Object* obj) const;
LargeObjectBitmap* GetLargeObjectBitmap(const mirror::Object* obj) const;
void Walk(ObjectCallback* callback, void* arg)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_);
template <typename Visitor>
void Visit(const Visitor& visitor)
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Find and replace a bitmap pointer, this is used by for the bitmap swapping in the GC.
void ReplaceBitmap(ContinuousSpaceBitmap* old_bitmap, ContinuousSpaceBitmap* new_bitmap)
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 35bcb18..24a2c17 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -98,24 +98,24 @@
// Extra parameters are required since we use this same visitor signature for checking objects.
void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
MarkReference(obj->GetFieldObjectReferenceAddr(offset));
}
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
VisitRoot(root);
}
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
MarkReference(root);
}
private:
template<bool kPoisonReferences>
void MarkReference(mirror::ObjectReference<kPoisonReferences, mirror::Object>* obj_ptr) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Only add the reference if it is non null and fits our criteria.
mirror::Object* ref = obj_ptr->AsMirrorPtr();
if (ref != nullptr && !from_space_->HasAddress(ref) && !immune_space_->HasAddress(ref)) {
@@ -150,7 +150,7 @@
void operator()(mirror::Object* root) const
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(root != nullptr);
ModUnionUpdateObjectReferencesVisitor ref_visitor(visitor_,
from_space_,
@@ -193,7 +193,7 @@
// Extra parameters are required since we use this same visitor signature for checking objects.
void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::HeapReference<mirror::Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset);
mirror::Object* ref = ref_ptr->AsMirrorPtr();
// Only add the reference if it is non null and fits our criteria.
@@ -204,14 +204,14 @@
}
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
}
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (mod_union_table_->ShouldAddReference(root->AsMirrorPtr())) {
*has_target_reference_ = true;
// TODO: Add MarkCompressedReference callback here.
@@ -242,7 +242,7 @@
has_target_reference_(has_target_reference) {}
void operator()(mirror::Object* obj) const
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
// We don't have an early exit since we use the visitor pattern, an early
// exit should significantly speed this up.
AddToReferenceArrayVisitor visitor(mod_union_table_,
@@ -268,7 +268,7 @@
// Extra parameters are required since we use this same visitor signature for checking objects.
void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
if (ref != nullptr &&
mod_union_table_->ShouldAddReference(ref) &&
@@ -289,14 +289,14 @@
}
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (kIsDebugBuild && !root->IsNull()) {
VisitRoot(root);
}
}
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!mod_union_table_->ShouldAddReference(root->AsMirrorPtr()));
}
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index 6aa2417..b6792c4 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -121,17 +121,17 @@
// Update table based on cleared cards and mark all references to the other spaces.
void UpdateAndMarkReferences(MarkObjectVisitor* visitor) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_);
virtual void VisitObjects(ObjectCallback* callback, void* arg) OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Exclusive lock is required since verify uses SpaceBitmap::VisitMarkedRange and
// VisitMarkedRange can't know if the callback will modify the bitmap or not.
void Verify() OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_);
// Function that tells whether or not to add a reference to the table.
@@ -139,7 +139,7 @@
virtual bool ContainsCardFor(uintptr_t addr) OVERRIDE;
- virtual void Dump(std::ostream& os) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ virtual void Dump(std::ostream& os) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
virtual void SetCards() OVERRIDE;
@@ -169,11 +169,11 @@
// Mark all references to the alloc space(s).
virtual void UpdateAndMarkReferences(MarkObjectVisitor* visitor) OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
virtual void VisitObjects(ObjectCallback* callback, void* arg) OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Nothing to verify.
virtual void Verify() OVERRIDE {}
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
index 349d6ff..2810f58 100644
--- a/runtime/gc/accounting/mod_union_table_test.cc
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -47,7 +47,7 @@
}
mirror::ObjectArray<mirror::Object>* AllocObjectArray(
Thread* self, space::ContinuousMemMapAllocSpace* space, size_t component_count)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
auto* klass = GetObjectArrayClass(self, space);
const size_t size = mirror::ComputeArraySize(component_count, 2);
size_t bytes_allocated = 0, bytes_tl_bulk_allocated;
@@ -68,7 +68,7 @@
private:
mirror::Class* GetObjectArrayClass(Thread* self, space::ContinuousMemMapAllocSpace* space)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (java_lang_object_array_ == nullptr) {
java_lang_object_array_ =
Runtime::Current()->GetClassLinker()->GetClassRoot(ClassLinker::kObjectArrayClass);
@@ -98,12 +98,12 @@
public:
explicit CollectVisitedVisitor(std::set<mirror::Object*>* out) : out_(out) {}
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(ref != nullptr);
MarkObject(ref->AsMirrorPtr());
}
virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
out_->insert(obj);
return obj;
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index eb0852a..7229f76 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -67,7 +67,7 @@
contains_reference_to_target_space_(contains_reference_to_target_space) {}
void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
mirror::HeapReference<mirror::Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset);
if (target_space_->HasAddress(ref_ptr->AsMirrorPtr())) {
@@ -78,7 +78,7 @@
}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
if (target_space_->HasAddress(ref->GetReferent())) {
*contains_reference_to_target_space_ = true;
collector_->DelayReferenceReferent(klass, ref);
@@ -86,14 +86,14 @@
}
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
}
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (target_space_->HasAddress(root->AsMirrorPtr())) {
*contains_reference_to_target_space_ = true;
root->Assign(collector_->MarkObject(root->AsMirrorPtr()));
@@ -116,7 +116,7 @@
contains_reference_to_target_space_(contains_reference_to_target_space) {}
void operator()(mirror::Object* obj) const REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RememberedSetReferenceVisitor visitor(target_space_, contains_reference_to_target_space_,
collector_);
obj->VisitReferences(visitor, visitor);
diff --git a/runtime/gc/accounting/remembered_set.h b/runtime/gc/accounting/remembered_set.h
index 3a0dcf7..5594781 100644
--- a/runtime/gc/accounting/remembered_set.h
+++ b/runtime/gc/accounting/remembered_set.h
@@ -57,7 +57,7 @@
void UpdateAndMarkReferences(space::ContinuousSpace* target_space,
collector::GarbageCollector* collector)
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void Dump(std::ostream& os);
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 3df02ed..3649111 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -195,7 +195,7 @@
void SpaceBitmap<kAlignment>::WalkInstanceFields(SpaceBitmap<kAlignment>* visited,
ObjectCallback* callback, mirror::Object* obj,
mirror::Class* klass, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Visit fields of parent classes first.
mirror::Class* super = klass->GetSuperClass();
if (super != nullptr) {
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 829b1b1..576f9c7 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -123,7 +123,7 @@
// Visit the live objects in the range [visit_begin, visit_end).
// TODO: Use lock annotations when clang is fixed.
- // REQUIRES(Locks::heap_bitmap_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ // REQUIRES(Locks::heap_bitmap_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
template <typename Visitor>
void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, const Visitor& visitor) const
NO_THREAD_SAFETY_ANALYSIS;
@@ -131,12 +131,12 @@
// Visits set bits in address order. The callback is not permitted to change the bitmap bits or
// max during the traversal.
void Walk(ObjectCallback* callback, void* arg)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_);
// Visits set bits with an in order traversal. The callback is not permitted to change the bitmap
// bits or max during the traversal.
void InOrderWalk(ObjectCallback* callback, void* arg)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Walk through the bitmaps in increasing address order, and find the object pointers that
// correspond to garbage objects. Call <callback> zero or more times with lists of these object
@@ -204,12 +204,12 @@
// For an unvisited object, visit it then all its children found via fields.
static void WalkFieldsInOrder(SpaceBitmap* visited, ObjectCallback* callback, mirror::Object* obj,
- void* arg) SHARED_REQUIRES(Locks::mutator_lock_);
+ void* arg) REQUIRES_SHARED(Locks::mutator_lock_);
// Walk instance fields of the given Class. Separate function to allow recursion on the super
// class.
static void WalkInstanceFields(SpaceBitmap<kAlignment>* visited, ObjectCallback* callback,
mirror::Object* obj, mirror::Class* klass, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Backing storage for bitmap.
std::unique_ptr<MemMap> mem_map_;
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 522f236..13ebb27 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -119,7 +119,7 @@
}
static inline void SweepClassObject(AllocRecord* record, IsMarkedVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_) {
GcRoot<mirror::Class>& klass = record->GetClassGcRoot();
// This does not need a read barrier because this is called by GC.
@@ -187,7 +187,7 @@
class AllocRecordStackVisitor : public StackVisitor {
public:
AllocRecordStackVisitor(Thread* thread, size_t max_depth, AllocRecordStackTrace* trace_out)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
max_depth_(max_depth),
trace_(trace_out) {}
diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h
index a2d86cc..f1f013b 100644
--- a/runtime/gc/allocation_record.h
+++ b/runtime/gc/allocation_record.h
@@ -38,7 +38,7 @@
class AllocRecordStackTraceElement {
public:
- int32_t ComputeLineNumber() const SHARED_REQUIRES(Locks::mutator_lock_);
+ int32_t ComputeLineNumber() const REQUIRES_SHARED(Locks::mutator_lock_);
AllocRecordStackTraceElement() = default;
AllocRecordStackTraceElement(ArtMethod* method, uint32_t dex_pc)
@@ -174,14 +174,14 @@
return trace_.GetTid();
}
- mirror::Class* GetClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Class* GetClass() const REQUIRES_SHARED(Locks::mutator_lock_) {
return klass_.Read();
}
const char* GetClassDescriptor(std::string* storage) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- GcRoot<mirror::Class>& GetClassGcRoot() SHARED_REQUIRES(Locks::mutator_lock_) {
+ GcRoot<mirror::Class>& GetClassGcRoot() REQUIRES_SHARED(Locks::mutator_lock_) {
return klass_;
}
@@ -213,7 +213,7 @@
mirror::Object** obj,
size_t byte_count)
REQUIRES(!Locks::alloc_tracker_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void SetAllocTrackingEnabled(bool enabled) REQUIRES(!Locks::alloc_tracker_lock_);
@@ -221,7 +221,7 @@
~AllocRecordObjectMap();
void Put(mirror::Object* obj, AllocRecord&& record)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_) {
if (entries_.size() == alloc_record_max_) {
entries_.pop_front();
@@ -229,22 +229,22 @@
entries_.push_back(EntryPair(GcRoot<mirror::Object>(obj), std::move(record)));
}
- size_t Size() const SHARED_REQUIRES(Locks::alloc_tracker_lock_) {
+ size_t Size() const REQUIRES_SHARED(Locks::alloc_tracker_lock_) {
return entries_.size();
}
- size_t GetRecentAllocationSize() const SHARED_REQUIRES(Locks::alloc_tracker_lock_) {
+ size_t GetRecentAllocationSize() const REQUIRES_SHARED(Locks::alloc_tracker_lock_) {
CHECK_LE(recent_record_max_, alloc_record_max_);
size_t sz = entries_.size();
return std::min(recent_record_max_, sz);
}
void VisitRoots(RootVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_);
void SweepAllocationRecords(IsMarkedVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_);
// Allocation tracking could be enabled by user in between DisallowNewAllocationRecords() and
@@ -254,36 +254,36 @@
// swept from the list. But missing the first few records is acceptable for using the button to
// enable allocation tracking.
void DisallowNewAllocationRecords()
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_);
void AllowNewAllocationRecords()
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_);
void BroadcastForNewAllocationRecords()
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_);
// TODO: Is there a better way to hide the entries_'s type?
EntryList::iterator Begin()
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_) {
return entries_.begin();
}
EntryList::iterator End()
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_) {
return entries_.end();
}
EntryList::reverse_iterator RBegin()
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_) {
return entries_.rbegin();
}
EntryList::reverse_iterator REnd()
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_) {
return entries_.rend();
}
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 375d869..a7f2aa0 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -133,7 +133,7 @@
DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
if (req_byte_size <= fpr_byte_size) {
// Found one.
- free_page_runs_.erase(it++);
+ it = free_page_runs_.erase(it);
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x"
<< std::hex << reinterpret_cast<intptr_t>(fpr)
@@ -141,7 +141,8 @@
}
if (req_byte_size < fpr_byte_size) {
// Split.
- FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
+ FreePageRun* remainder =
+ reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
if (kIsDebugBuild) {
remainder->magic_num_ = kMagicNumFree;
}
@@ -364,86 +365,74 @@
<< std::hex << reinterpret_cast<uintptr_t>(fpr->End(this)) << " [" << std::dec
<< (fpr->End(this) == End() ? page_map_size_ : ToPageMapIndex(fpr->End(this))) << "]";
}
- auto higher_it = free_page_runs_.upper_bound(fpr);
- if (higher_it != free_page_runs_.end()) {
- for (auto it = higher_it; it != free_page_runs_.end(); ) {
- FreePageRun* h = *it;
- DCHECK_EQ(h->ByteSize(this) % kPageSize, static_cast<size_t>(0));
+ for (auto it = free_page_runs_.upper_bound(fpr); it != free_page_runs_.end(); ) {
+ FreePageRun* h = *it;
+ DCHECK_EQ(h->ByteSize(this) % kPageSize, static_cast<size_t>(0));
+ if (kTraceRosAlloc) {
+ LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a higher free page run 0x"
+ << std::hex << reinterpret_cast<uintptr_t>(h) << " [" << std::dec << ToPageMapIndex(h) << "] -0x"
+ << std::hex << reinterpret_cast<uintptr_t>(h->End(this)) << " [" << std::dec
+ << (h->End(this) == End() ? page_map_size_ : ToPageMapIndex(h->End(this))) << "]";
+ }
+ if (fpr->End(this) == h->Begin()) {
if (kTraceRosAlloc) {
- LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a higher free page run 0x"
- << std::hex << reinterpret_cast<uintptr_t>(h) << " [" << std::dec << ToPageMapIndex(h) << "] -0x"
- << std::hex << reinterpret_cast<uintptr_t>(h->End(this)) << " [" << std::dec
- << (h->End(this) == End() ? page_map_size_ : ToPageMapIndex(h->End(this))) << "]";
+ LOG(INFO) << "Success";
}
- if (fpr->End(this) == h->Begin()) {
- if (kTraceRosAlloc) {
- LOG(INFO) << "Success";
- }
- // Clear magic num since this is no longer the start of a free page run.
- if (kIsDebugBuild) {
- h->magic_num_ = 0;
- }
- free_page_runs_.erase(it++);
- if (kTraceRosAlloc) {
- LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
- << reinterpret_cast<intptr_t>(h)
- << " from free_page_runs_";
- }
- fpr->SetByteSize(this, fpr->ByteSize(this) + h->ByteSize(this));
- DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
- } else {
- // Not adjacent. Stop.
- if (kTraceRosAlloc) {
- LOG(INFO) << "Fail";
- }
- break;
+ // Clear magic num since this is no longer the start of a free page run.
+ if (kIsDebugBuild) {
+ h->magic_num_ = 0;
}
+ it = free_page_runs_.erase(it);
+ if (kTraceRosAlloc) {
+ LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
+ << reinterpret_cast<intptr_t>(h)
+ << " from free_page_runs_";
+ }
+ fpr->SetByteSize(this, fpr->ByteSize(this) + h->ByteSize(this));
+ DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
+ } else {
+ // Not adjacent. Stop.
+ if (kTraceRosAlloc) {
+ LOG(INFO) << "Fail";
+ }
+ break;
}
}
// Try to coalesce in the lower address direction.
- auto lower_it = free_page_runs_.upper_bound(fpr);
- if (lower_it != free_page_runs_.begin()) {
- --lower_it;
- for (auto it = lower_it; ; ) {
- // We want to try to coalesce with the first element but
- // there's no "<=" operator for the iterator.
- bool to_exit_loop = it == free_page_runs_.begin();
+ for (auto it = free_page_runs_.upper_bound(fpr); it != free_page_runs_.begin(); ) {
+ --it;
- FreePageRun* l = *it;
- DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
+ FreePageRun* l = *it;
+ DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
+ if (kTraceRosAlloc) {
+ LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a lower free page run 0x"
+ << std::hex << reinterpret_cast<uintptr_t>(l) << " [" << std::dec << ToPageMapIndex(l) << "] -0x"
+ << std::hex << reinterpret_cast<uintptr_t>(l->End(this)) << " [" << std::dec
+ << (l->End(this) == End() ? page_map_size_ : ToPageMapIndex(l->End(this))) << "]";
+ }
+ if (l->End(this) == fpr->Begin()) {
if (kTraceRosAlloc) {
- LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a lower free page run 0x"
- << std::hex << reinterpret_cast<uintptr_t>(l) << " [" << std::dec << ToPageMapIndex(l) << "] -0x"
- << std::hex << reinterpret_cast<uintptr_t>(l->End(this)) << " [" << std::dec
- << (l->End(this) == End() ? page_map_size_ : ToPageMapIndex(l->End(this))) << "]";
+ LOG(INFO) << "Success";
}
- if (l->End(this) == fpr->Begin()) {
- if (kTraceRosAlloc) {
- LOG(INFO) << "Success";
- }
- free_page_runs_.erase(it--);
- if (kTraceRosAlloc) {
- LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
- << reinterpret_cast<intptr_t>(l)
- << " from free_page_runs_";
- }
- l->SetByteSize(this, l->ByteSize(this) + fpr->ByteSize(this));
- DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
- // Clear magic num since this is no longer the start of a free page run.
- if (kIsDebugBuild) {
- fpr->magic_num_ = 0;
- }
- fpr = l;
- } else {
- // Not adjacent. Stop.
- if (kTraceRosAlloc) {
- LOG(INFO) << "Fail";
- }
- break;
+ it = free_page_runs_.erase(it);
+ if (kTraceRosAlloc) {
+ LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
+ << reinterpret_cast<intptr_t>(l)
+ << " from free_page_runs_";
}
- if (to_exit_loop) {
- break;
+ l->SetByteSize(this, l->ByteSize(this) + fpr->ByteSize(this));
+ DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
+ // Clear magic num since this is no longer the start of a free page run.
+ if (kIsDebugBuild) {
+ fpr->magic_num_ = 0;
}
+ fpr = l;
+ } else {
+ // Not adjacent. Stop.
+ if (kTraceRosAlloc) {
+ LOG(INFO) << "Fail";
+ }
+ break;
}
}
}
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 85d307b..975ac36 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -241,7 +241,7 @@
: concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
}
- virtual void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ virtual void Run(Thread* thread) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* self = Thread::Current();
CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -271,7 +271,7 @@
void VisitRoots(mirror::Object*** roots,
size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
mirror::Object** root = roots[i];
mirror::Object* ref = *root;
@@ -287,7 +287,7 @@
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
mirror::CompressedReference<mirror::Object>* const root = roots[i];
if (!root->IsNull()) {
@@ -355,14 +355,14 @@
: collector_(collector) {}
void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
- const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
+ const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
CheckReference(obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset),
obj, offset);
}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
- SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
+ REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
CHECK(klass->IsTypeOfReferenceClass());
CheckReference(ref->GetReferent<kWithoutReadBarrier>(),
ref,
@@ -371,7 +371,7 @@
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
@@ -379,7 +379,7 @@
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CheckReference(root->AsMirrorPtr(), nullptr, MemberOffset(0));
}
@@ -387,7 +387,7 @@
ConcurrentCopying* const collector_;
void CheckReference(mirror::Object* ref, mirror::Object* holder, MemberOffset offset) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (ref != nullptr) {
if (!collector_->immune_spaces_.ContainsObject(ref)) {
// Not immune, must be a zygote large object.
@@ -414,7 +414,7 @@
live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
reinterpret_cast<uintptr_t>(space->Limit()),
[&visitor](mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// If an object is not gray, it should only have references to things in the immune spaces.
if (obj->GetReadBarrierPointer() != ReadBarrier::GrayPtr()) {
obj->VisitReferences</*kVisitNativeRoots*/true,
@@ -456,7 +456,7 @@
public:
explicit GrayImmuneObjectVisitor() {}
- ALWAYS_INLINE void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
if (kUseBakerReadBarrier) {
if (kIsDebugBuild) {
Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
@@ -465,7 +465,7 @@
}
}
- static void Callback(mirror::Object* obj, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
reinterpret_cast<GrayImmuneObjectVisitor*>(arg)->operator()(obj);
}
};
@@ -540,7 +540,7 @@
explicit ImmuneSpaceScanObjVisitor(ConcurrentCopying* cc)
: collector_(cc) {}
- ALWAYS_INLINE void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
if (obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
collector_->ScanImmuneObject(obj);
@@ -554,7 +554,7 @@
}
}
- static void Callback(mirror::Object* obj, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
reinterpret_cast<ImmuneSpaceScanObjVisitor*>(arg)->operator()(obj);
}
@@ -922,7 +922,7 @@
: collector_(collector) {}
void operator()(mirror::Object* ref) const
- SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
+ REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
if (ref == nullptr) {
// OK.
return;
@@ -936,7 +936,7 @@
}
void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(root != nullptr);
operator()(root);
}
@@ -951,27 +951,27 @@
: collector_(collector) {}
void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
+ REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
mirror::Object* ref =
obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
VerifyNoFromSpaceRefsVisitor visitor(collector_);
visitor(ref);
}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
- SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
+ REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
CHECK(klass->IsTypeOfReferenceClass());
this->operator()(ref, mirror::Reference::ReferentOffset(), false);
}
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
}
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
VerifyNoFromSpaceRefsVisitor visitor(collector_);
visitor(root->AsMirrorPtr());
}
@@ -985,11 +985,11 @@
explicit VerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
void operator()(mirror::Object* obj) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectCallback(obj, collector_);
}
static void ObjectCallback(mirror::Object* obj, void *arg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(obj != nullptr);
ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
space::RegionSpace* region_space = collector->RegionSpace();
@@ -1055,7 +1055,7 @@
: collector_(collector) {}
void operator()(mirror::Object* ref) const
- SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
+ REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
if (ref == nullptr) {
// OK.
return;
@@ -1073,26 +1073,26 @@
: collector_(collector) {}
void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
+ REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
mirror::Object* ref =
obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
AssertToSpaceInvariantRefsVisitor visitor(collector_);
visitor(ref);
}
void operator()(mirror::Class* klass, mirror::Reference* ref ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
+ REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
CHECK(klass->IsTypeOfReferenceClass());
}
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
}
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
AssertToSpaceInvariantRefsVisitor visitor(collector_);
visitor(root->AsMirrorPtr());
}
@@ -1106,11 +1106,11 @@
explicit AssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
void operator()(mirror::Object* obj) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectCallback(obj, collector_);
}
static void ObjectCallback(mirror::Object* obj, void *arg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(obj != nullptr);
ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
space::RegionSpace* region_space = collector->RegionSpace();
@@ -1474,7 +1474,7 @@
reinterpret_cast<uintptr_t>(los->End()),
[mark_bitmap, los, self](mirror::Object* obj)
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (los->IsZygoteLargeObject(self, obj)) {
mark_bitmap->Set(obj);
}
@@ -1600,7 +1600,7 @@
template <class MirrorType>
ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
@@ -1608,13 +1608,13 @@
template <class MirrorType>
void VisitRoot(mirror::Object** root)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << *root;
}
template <class MirrorType>
void VisitRoot(mirror::CompressedReference<MirrorType>* root)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << root->AsMirrorPtr();
}
};
@@ -1745,20 +1745,20 @@
: collector_(collector) {}
void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
- const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
+ const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
collector_->Process(obj, offset);
}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
- SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
+ REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
CHECK(klass->IsTypeOfReferenceClass());
collector_->DelayReferenceReferent(klass, ref);
}
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
@@ -1766,7 +1766,7 @@
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
collector_->MarkRoot</*kGrayImmuneObject*/false>(root);
}
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 97f4555..1ef0aea 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -71,16 +71,16 @@
!mark_stack_lock_,
!rb_slow_path_histogram_lock_,
!skipped_blocks_lock_);
- void InitializePhase() SHARED_REQUIRES(Locks::mutator_lock_)
+ void InitializePhase() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !immune_gray_stack_lock_);
- void MarkingPhase() SHARED_REQUIRES(Locks::mutator_lock_)
+ void MarkingPhase() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
- void ReclaimPhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+ void ReclaimPhase() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
void FinishPhase() REQUIRES(!mark_stack_lock_,
!rb_slow_path_histogram_lock_,
!skipped_blocks_lock_);
- void BindBitmaps() SHARED_REQUIRES(Locks::mutator_lock_)
+ void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::heap_bitmap_lock_);
virtual GcType GetGcType() const OVERRIDE {
return kGcTypePartial;
@@ -97,19 +97,19 @@
return region_space_;
}
void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref)
- SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsInToSpace(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsInToSpace(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(ref != nullptr);
return IsMarked(ref) == ref;
}
template<bool kGrayImmuneObject = true, bool kFromGCThread = false>
ALWAYS_INLINE mirror::Object* Mark(mirror::Object* from_ref)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
ALWAYS_INLINE mirror::Object* MarkFromReadBarrier(mirror::Object* from_ref)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
bool IsMarking() const {
return is_marking_;
@@ -123,37 +123,37 @@
bool IsWeakRefAccessEnabled() {
return weak_ref_access_enabled_.LoadRelaxed();
}
- void RevokeThreadLocalMarkStack(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_)
+ void RevokeThreadLocalMarkStack(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
private:
- void PushOntoMarkStack(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_)
+ void PushOntoMarkStack(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
- mirror::Object* Copy(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_)
+ mirror::Object* Copy(mirror::Object* from_ref) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
- void Scan(mirror::Object* to_ref) SHARED_REQUIRES(Locks::mutator_lock_)
+ void Scan(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
void Process(mirror::Object* obj, MemberOffset offset)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_, !immune_gray_stack_lock_);
virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
template<bool kGrayImmuneObject>
void MarkRoot(mirror::CompressedReference<mirror::Object>* root)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_);
accounting::ObjectStack* GetAllocationStack();
accounting::ObjectStack* GetLiveStack();
- virtual void ProcessMarkStack() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
+ virtual void ProcessMarkStack() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
- bool ProcessMarkStackOnce() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
- void ProcessMarkStackRef(mirror::Object* to_ref) SHARED_REQUIRES(Locks::mutator_lock_)
+ bool ProcessMarkStackOnce() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+ void ProcessMarkStackRef(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
void GrayAllDirtyImmuneObjects()
REQUIRES(Locks::mutator_lock_)
@@ -162,75 +162,75 @@
REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access)
- SHARED_REQUIRES(Locks::mutator_lock_);
- void SwitchToSharedMarkStackMode() SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void SwitchToSharedMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
- void SwitchToGcExclusiveMarkStackMode() SHARED_REQUIRES(Locks::mutator_lock_);
+ void SwitchToGcExclusiveMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_);
virtual void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
- void ProcessReferences(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void ProcessReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SweepSystemWeaks(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
void Sweep(bool swap_bitmaps)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
void SweepLargeObjects(bool swap_bitmaps)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
void MarkZygoteLargeObjects()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* AllocateInSkippedBlock(size_t alloc_size)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
- void CheckEmptyMarkStack() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
- void IssueEmptyCheckpoint() SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsOnAllocStack(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void CheckEmptyMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+ void IssueEmptyCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsOnAllocStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* GetFwdPtr(mirror::Object* from_ref)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void FlipThreadRoots() REQUIRES(!Locks::mutator_lock_);
- void SwapStacks() SHARED_REQUIRES(Locks::mutator_lock_);
+ void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_);
void RecordLiveStackFreezeSize(Thread* self);
void ComputeUnevacFromSpaceLiveRatio();
void LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, mirror::Object* ref)
- SHARED_REQUIRES(Locks::mutator_lock_);
- void ReenableWeakRefAccess(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
- void DisableMarking() SHARED_REQUIRES(Locks::mutator_lock_);
- void IssueDisableMarkingCheckpoint() SHARED_REQUIRES(Locks::mutator_lock_);
- void ExpandGcMarkStack() SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::Object* MarkNonMoving(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void ReenableWeakRefAccess(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
+ void DisableMarking() REQUIRES_SHARED(Locks::mutator_lock_);
+ void IssueDisableMarkingCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
+ void ExpandGcMarkStack() REQUIRES_SHARED(Locks::mutator_lock_);
+ mirror::Object* MarkNonMoving(mirror::Object* from_ref) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
ALWAYS_INLINE mirror::Object* MarkUnevacFromSpaceRegion(mirror::Object* from_ref,
accounting::SpaceBitmap<kObjectAlignment>* bitmap)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
template<bool kGrayImmuneObject>
ALWAYS_INLINE mirror::Object* MarkImmuneSpace(mirror::Object* from_ref)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!immune_gray_stack_lock_);
- void PushOntoFalseGrayStack(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!immune_gray_stack_lock_);
+ void PushOntoFalseGrayStack(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
- void ProcessFalseGrayStack() SHARED_REQUIRES(Locks::mutator_lock_)
+ void ProcessFalseGrayStack() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
void ScanImmuneObject(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
mirror::Object* MarkFromReadBarrierWithMeasurements(mirror::Object* from_ref)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
void DumpPerformanceInfo(std::ostream& os) OVERRIDE REQUIRES(!rb_slow_path_histogram_lock_);
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index e0b71a7..6afe876 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -155,7 +155,7 @@
// this is the allocation space, for full GC then we swap the zygote bitmaps too.
void SwapBitmaps()
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
uint64_t GetTotalPausedTimeNs() REQUIRES(!pause_histogram_lock_);
int64_t GetTotalFreedBytes() const {
return total_freed_bytes_;
@@ -186,18 +186,18 @@
// Helper functions for querying if objects are marked. These are used for processing references,
// and will be used for reading system weaks while the GC is running.
virtual mirror::Object* IsMarked(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Used by reference processor.
- virtual void ProcessMarkStack() SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ virtual void ProcessMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Force mark an object.
virtual mirror::Object* MarkObject(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
virtual void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
protected:
// Run all of the GC phases.
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 43482eb..d866106 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -136,7 +136,7 @@
} else {
DCHECK(!space_->HasAddress(obj));
auto slow_path = [this](const mirror::Object* ref)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Marking a large object, make sure its aligned as a sanity check.
if (!IsAligned<kPageSize>(ref)) {
Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
@@ -289,7 +289,7 @@
void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
OVERRIDE REQUIRES(Locks::mutator_lock_)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
for (size_t i = 0; i < count; ++i) {
mirror::Object* obj = *roots[i];
mirror::Object* new_obj = collector_->GetMarkedForwardAddress(obj);
@@ -303,7 +303,7 @@
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
OVERRIDE REQUIRES(Locks::mutator_lock_)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
for (size_t i = 0; i < count; ++i) {
mirror::Object* obj = roots[i]->AsMirrorPtr();
mirror::Object* new_obj = collector_->GetMarkedForwardAddress(obj);
@@ -322,7 +322,7 @@
public:
explicit UpdateObjectReferencesVisitor(MarkCompact* collector) : collector_(collector) {}
- void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::heap_bitmap_lock_)
+ void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::heap_bitmap_lock_)
REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
collector_->UpdateObjectReferences(obj);
}
@@ -509,7 +509,7 @@
objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()),
reinterpret_cast<uintptr_t>(space_->End()),
[this](mirror::Object* obj)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_)
REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
MoveObject(obj, obj->SizeOf());
});
@@ -558,7 +558,7 @@
}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_) {
collector_->DelayReferenceReferent(klass, ref);
}
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index 16abfb7..a61646c 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -96,7 +96,7 @@
// Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
// the image. Mark that portion of the heap as immune.
- void BindBitmaps() SHARED_REQUIRES(Locks::mutator_lock_)
+ void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::heap_bitmap_lock_);
void UnBindBitmaps()
@@ -112,7 +112,7 @@
void SweepLargeObjects(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_);
void SweepSystemWeaks()
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
OVERRIDE REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
@@ -123,33 +123,33 @@
// Schedules an unmarked object for reference processing.
void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
protected:
// Returns null if the object is not marked, otherwise returns the forwarding address (same as
// object for non movable things).
mirror::Object* GetMarkedForwardAddress(mirror::Object* object)
REQUIRES(Locks::mutator_lock_)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_);
// Marks or unmarks a large object based on whether or not set is true. If set is true, then we
// mark, otherwise we unmark.
bool MarkLargeObject(const mirror::Object* obj)
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Expand mark stack to 2x its current size.
- void ResizeMarkStack(size_t new_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ void ResizeMarkStack(size_t new_size) REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if we should sweep the space.
bool ShouldSweepSpace(space::ContinuousSpace* space) const;
// Push an object onto the mark stack.
- void MarkStackPush(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ void MarkStackPush(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
void UpdateAndMarkModUnion()
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Recursively blackens objects on the mark stack.
void ProcessMarkStack()
@@ -173,20 +173,20 @@
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr) OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
virtual mirror::Object* IsMarked(mirror::Object* obj) OVERRIDE
- SHARED_REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_)
REQUIRES(Locks::mutator_lock_);
virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj) OVERRIDE
- SHARED_REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_)
REQUIRES(Locks::mutator_lock_);
void ForwardObject(mirror::Object* obj) REQUIRES(Locks::heap_bitmap_lock_,
Locks::mutator_lock_);
// Update a single heap reference.
void UpdateHeapReference(mirror::HeapReference<mirror::Object>* reference)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_)
REQUIRES(Locks::mutator_lock_);
// Update all of the references of a single object.
void UpdateObjectReferences(mirror::Object* obj)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_)
REQUIRES(Locks::mutator_lock_);
// Revoke all the thread-local buffers.
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 3904160..cbc4dc1 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -275,7 +275,7 @@
void operator()(mirror::Object* obj) const
ALWAYS_INLINE
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (kCheckLocks) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
@@ -541,7 +541,7 @@
explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { }
void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
CHECK(collector_->IsMarked(root) != nullptr) << info.ToString();
}
@@ -568,7 +568,7 @@
class MarkSweep::VerifyRootVisitor : public SingleRootVisitor {
public:
void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
// See if the root is on any space bitmap.
auto* heap = Runtime::Current()->GetHeap();
if (heap->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
@@ -618,7 +618,7 @@
void operator()(mirror::Class* klass, mirror::Reference* ref) const
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
collector_->DelayReferenceReferent(klass, ref);
}
@@ -659,19 +659,19 @@
ALWAYS_INLINE void operator()(mirror::Object* obj,
MemberOffset offset,
bool is_static ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Mark(obj->GetFieldObject<mirror::Object>(offset));
}
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
}
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (kCheckLocks) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
@@ -680,7 +680,7 @@
}
private:
- ALWAYS_INLINE void Mark(mirror::Object* ref) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE void Mark(mirror::Object* ref) const REQUIRES_SHARED(Locks::mutator_lock_) {
if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) {
if (kUseFinger) {
std::atomic_thread_fence(std::memory_order_seq_cst);
@@ -705,7 +705,7 @@
// No thread safety analysis since multiple threads will use this visitor.
void operator()(mirror::Object* obj) const
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
MarkSweep* const mark_sweep = chunk_task_->mark_sweep_;
MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep);
DelayReferenceReferentVisitor ref_visitor(mark_sweep);
@@ -732,7 +732,7 @@
size_t mark_stack_pos_;
ALWAYS_INLINE void MarkStackPush(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
// Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
mark_stack_pos_ /= 2;
@@ -754,7 +754,7 @@
// Scans all of the objects
virtual void Run(Thread* self ATTRIBUTE_UNUSED)
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScanObjectParallelVisitor visitor(this);
// TODO: Tune this.
static const size_t kFifoSize = 4;
@@ -1069,7 +1069,7 @@
virtual mirror::Object* IsMarked(mirror::Object* obj)
OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
mark_sweep_->VerifyIsLive(obj);
return obj;
}
@@ -1102,7 +1102,7 @@
}
void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_) {
for (size_t i = 0; i < count; ++i) {
mark_sweep_->MarkObjectNonNullParallel(*roots[i]);
@@ -1112,7 +1112,7 @@
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_) {
for (size_t i = 0; i < count; ++i) {
mark_sweep_->MarkObjectNonNullParallel(roots[i]->AsMirrorPtr());
@@ -1311,7 +1311,7 @@
MemberOffset offset,
bool is_static ATTRIBUTE_UNUSED) const
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (kCheckLocks) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
@@ -1321,7 +1321,7 @@
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
@@ -1329,7 +1329,7 @@
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (kCheckLocks) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 9747031..bbac9da 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -60,14 +60,14 @@
virtual void RunPhases() OVERRIDE REQUIRES(!mark_stack_lock_);
void InitializePhase();
- void MarkingPhase() REQUIRES(!mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ void MarkingPhase() REQUIRES(!mark_stack_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void PausePhase() REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
- void ReclaimPhase() REQUIRES(!mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ void ReclaimPhase() REQUIRES(!mark_stack_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void FinishPhase();
virtual void MarkReachableObjects()
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool IsConcurrent() const {
return is_concurrent_;
@@ -85,71 +85,71 @@
void Init();
// Find the default mark bitmap.
- void FindDefaultSpaceBitmap() SHARED_REQUIRES(Locks::mutator_lock_);
+ void FindDefaultSpaceBitmap() REQUIRES_SHARED(Locks::mutator_lock_);
// Marks all objects in the root set at the start of a garbage collection.
void MarkRoots(Thread* self)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void MarkNonThreadRoots()
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void MarkConcurrentRoots(VisitRootFlags flags)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void MarkRootsCheckpoint(Thread* self, bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Builds a mark stack and recursively mark until it empties.
void RecursiveMark()
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
// the image. Mark that portion of the heap as immune.
- virtual void BindBitmaps() SHARED_REQUIRES(Locks::mutator_lock_);
+ virtual void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_);
// Builds a mark stack with objects on dirty cards and recursively mark until it empties.
void RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Remarks the root set after completing the concurrent mark.
void ReMarkRoots()
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void ProcessReferences(Thread* self)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Update and mark references from immune spaces.
void UpdateAndMarkModUnion()
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Pre clean cards to reduce how much work is needed in the pause.
void PreCleanCards()
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Sweeps unmarked objects to complete the garbage collection. Virtual as by default it sweeps
// all allocation spaces. Partial and sticky GCs want to just sweep a subset of the heap.
virtual void Sweep(bool swap_bitmaps)
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Sweeps unmarked objects to complete the garbage collection.
void SweepLargeObjects(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_);
@@ -157,13 +157,13 @@
// Sweep only pointers within an array. WARNING: Trashes objects.
void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps)
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Blackens an object.
void ScanObject(mirror::Object* obj)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// No thread safety analysis due to lambdas.
template<typename MarkVisitor, typename ReferenceVisitor>
@@ -172,53 +172,53 @@
const ReferenceVisitor& ref_visitor)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SweepSystemWeaks(Thread* self)
REQUIRES(!Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static mirror::Object* VerifySystemWeakIsLiveCallback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
void VerifySystemWeaks()
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// Verify that an object is live, either in a live bitmap or in the allocation stack.
void VerifyIsLive(const mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
size_t count,
const RootInfo& info) OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Marks an object.
virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void MarkObject(mirror::Object* obj, mirror::Object* holder, MemberOffset offset)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
Barrier& GetBarrier() {
return *gc_barrier_;
@@ -226,24 +226,24 @@
// Schedules an unmarked object for reference processing.
void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
protected:
// Returns object if the object is marked in the heap bitmap, otherwise null.
virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
void MarkObjectNonNull(mirror::Object* obj,
mirror::Object* holder = nullptr,
MemberOffset offset = MemberOffset(0))
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Marks an object atomically, safe to use from multiple threads.
void MarkObjectNonNullParallel(mirror::Object* obj)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if we need to add obj to a mark stack.
bool MarkObjectParallel(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
@@ -251,16 +251,16 @@
// Verify the roots of the heap and print out information related to any invalid roots.
// Called in MarkObject, so may we may not hold the mutator lock.
void VerifySuspendedThreadRoots()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Expand mark stack to 2x its current size.
void ExpandMarkStack()
REQUIRES(mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void ResizeMarkStack(size_t new_size)
REQUIRES(mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns how many threads we should use for the current GC phase based on if we are paused,
// whether or not we care about pauses.
@@ -269,19 +269,19 @@
// Push a single reference on a mark stack.
void PushOnMarkStack(mirror::Object* obj)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Blackens objects grayed during a garbage collection.
void ScanGrayObjects(bool paused, uint8_t minimum_age)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
virtual void ProcessMarkStack()
OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ProcessMarkStack(false);
}
@@ -289,12 +289,12 @@
void ProcessMarkStack(bool paused)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void ProcessMarkStackParallel(size_t thread_count)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Used to Get around thread safety annotations. The call is from MarkingPhase and is guarded by
// IsExclusiveHeld.
diff --git a/runtime/gc/collector/partial_mark_sweep.h b/runtime/gc/collector/partial_mark_sweep.h
index e9b4f6f..8b0d3dd 100644
--- a/runtime/gc/collector/partial_mark_sweep.h
+++ b/runtime/gc/collector/partial_mark_sweep.h
@@ -37,7 +37,7 @@
// Bind the live bits to the mark bits of bitmaps for spaces that aren't collected for partial
// collections, ie the Zygote space. Also mark this space is immune. Virtual as overridden by
// StickyMarkSweep.
- virtual void BindBitmaps() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ virtual void BindBitmaps() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(PartialMarkSweep);
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 7a4c025..47e6ca3 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -265,16 +265,20 @@
RecordFree(ObjectBytePair(from_objects - to_objects, from_bytes - to_bytes));
// Clear and protect the from space.
from_space_->Clear();
- if (kProtectFromSpace && !from_space_->IsRosAllocSpace()) {
- // Protect with PROT_NONE.
- VLOG(heap) << "Protecting from_space_ : " << *from_space_;
- from_space_->GetMemMap()->Protect(PROT_NONE);
- } else {
- // If RosAllocSpace, we'll leave it as PROT_READ here so the
- // rosaloc verification can read the metadata magic number and
- // protect it with PROT_NONE later in FinishPhase().
- VLOG(heap) << "Protecting from_space_ with PROT_READ : " << *from_space_;
- from_space_->GetMemMap()->Protect(PROT_READ);
+ // b/31172841. Temporarily disable the from-space protection with host debug build
+ // due to some protection issue in the build server.
+ if (kProtectFromSpace && !(kIsDebugBuild && !kIsTargetBuild)) {
+ if (!from_space_->IsRosAllocSpace()) {
+ // Protect with PROT_NONE.
+ VLOG(heap) << "Protecting from_space_ : " << *from_space_;
+ from_space_->GetMemMap()->Protect(PROT_NONE);
+ } else {
+ // If RosAllocSpace, we'll leave it as PROT_READ here so the
+ // rosaloc verification can read the metadata magic number and
+ // protect it with PROT_NONE later in FinishPhase().
+ VLOG(heap) << "Protecting from_space_ with PROT_READ : " << *from_space_;
+ from_space_->GetMemMap()->Protect(PROT_READ);
+ }
}
heap_->PreSweepingGcVerification(this);
if (swap_semi_spaces_) {
@@ -289,7 +293,7 @@
: from_space_(from_space) {}
void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
- SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
+ REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
if (from_space_->HasAddress(ref)) {
Runtime::Current()->GetHeap()->DumpObject(LOG(INFO), obj);
@@ -382,7 +386,7 @@
live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
reinterpret_cast<uintptr_t>(space->End()),
[this](Object* obj)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
DCHECK(obj != nullptr);
VerifyNoFromSpaceReferences(obj);
});
@@ -790,9 +794,13 @@
void SemiSpace::FinishPhase() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
- if (kProtectFromSpace && from_space_->IsRosAllocSpace()) {
- VLOG(heap) << "Protecting from_space_ with PROT_NONE : " << *from_space_;
- from_space_->GetMemMap()->Protect(PROT_NONE);
+ // b/31172841. Temporarily disable the from-space protection with host debug build
+ // due to some protection issue in the build server.
+ if (kProtectFromSpace && !(kIsDebugBuild && !kIsTargetBuild)) {
+ if (from_space_->IsRosAllocSpace()) {
+ VLOG(heap) << "Protecting from_space_ with PROT_NONE : " << *from_space_;
+ from_space_->GetMemMap()->Protect(PROT_NONE);
+ }
}
// Null the "to" and "from" spaces since compacting from one to the other isn't valid until
// further action is done by the heap.
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 694e536..4b63d9b 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -117,7 +117,7 @@
REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
void VerifyNoFromSpaceReferences(mirror::Object* obj)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Marks the root set at the start of a garbage collection.
void MarkRoots()
@@ -125,7 +125,7 @@
// Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
// the image. Mark that portion of the heap as immune.
- virtual void BindBitmaps() SHARED_REQUIRES(Locks::mutator_lock_)
+ virtual void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::heap_bitmap_lock_);
void UnBindBitmaps()
@@ -137,13 +137,13 @@
// Sweeps unmarked objects to complete the garbage collection.
virtual void Sweep(bool swap_bitmaps)
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Sweeps unmarked objects to complete the garbage collection.
void SweepLargeObjects(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_);
void SweepSystemWeaks()
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
@@ -157,44 +157,44 @@
// Schedules an unmarked object for reference processing.
void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
protected:
// Returns null if the object is not marked, otherwise returns the forwarding address (same as
// object for non movable things).
virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
REQUIRES(Locks::mutator_lock_)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_);
virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* object) OVERRIDE
REQUIRES(Locks::mutator_lock_)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_);
// Marks or unmarks a large object based on whether or not set is true. If set is true, then we
// mark, otherwise we unmark.
bool MarkLargeObject(const mirror::Object* obj)
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Expand mark stack to 2x its current size.
- void ResizeMarkStack(size_t new_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ void ResizeMarkStack(size_t new_size) REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if we should sweep the space.
virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const;
// Push an object onto the mark stack.
- void MarkStackPush(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ void MarkStackPush(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
void UpdateAndMarkModUnion()
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Recursively blackens objects on the mark stack.
void ProcessMarkStack()
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
inline mirror::Object* GetForwardingAddressInFromSpace(mirror::Object* obj) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Revoke all the thread-local buffers.
void RevokeAllThreadLocalBuffers();
diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h
index abaf978..100ca64 100644
--- a/runtime/gc/collector/sticky_mark_sweep.h
+++ b/runtime/gc/collector/sticky_mark_sweep.h
@@ -36,17 +36,17 @@
protected:
// Bind the live bits to the mark bits of bitmaps for all spaces, all spaces other than the
// alloc space will be marked as immune.
- void BindBitmaps() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ void BindBitmaps() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
void MarkReachableObjects()
OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void Sweep(bool swap_bitmaps)
OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(StickyMarkSweep);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 4d16b6e..600aff1 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1704,7 +1704,7 @@
return nullptr;
}
-static inline bool EntrypointsInstrumented() SHARED_REQUIRES(Locks::mutator_lock_) {
+static inline bool EntrypointsInstrumented() REQUIRES_SHARED(Locks::mutator_lock_) {
instrumentation::Instrumentation* const instrumentation =
Runtime::Current()->GetInstrumentation();
return instrumentation != nullptr && instrumentation->AllocEntrypointsInstrumented();
@@ -1930,11 +1930,11 @@
InstanceCounter(const std::vector<mirror::Class*>& classes,
bool use_is_assignable_from,
uint64_t* counts)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {}
static void Callback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
InstanceCounter* instance_counter = reinterpret_cast<InstanceCounter*>(arg);
mirror::Class* instance_class = obj->GetClass();
CHECK(instance_class != nullptr);
@@ -1966,11 +1966,11 @@
class InstanceCollector {
public:
InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: class_(c), max_count_(max_count), instances_(instances) {
}
static void Callback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
DCHECK(arg != nullptr);
InstanceCollector* instance_collector = reinterpret_cast<InstanceCollector*>(arg);
if (obj->GetClass() == instance_collector->class_) {
@@ -2000,12 +2000,12 @@
ReferringObjectsFinder(mirror::Object* object,
int32_t max_count,
std::vector<mirror::Object*>& referring_objects)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: object_(object), max_count_(max_count), referring_objects_(referring_objects) {
}
static void Callback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
reinterpret_cast<ReferringObjectsFinder*>(arg)->operator()(obj);
}
@@ -2018,7 +2018,7 @@
// For Object::VisitReferences.
void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
if (ref == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
referring_objects_.push_back(obj);
@@ -2125,11 +2125,11 @@
}
void Heap::TransitionCollector(CollectorType collector_type) {
- // Collector transition must not happen with CC
- CHECK(!kUseReadBarrier);
if (collector_type == collector_type_) {
return;
}
+ // Collector transition must not happen with CC
+ CHECK(!kUseReadBarrier);
VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
<< " -> " << static_cast<int>(collector_type);
uint64_t start_time = NanoTime();
@@ -2374,7 +2374,7 @@
const bool is_running_on_memory_tool_;
static void Callback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(arg != nullptr);
BinContext* context = reinterpret_cast<BinContext*>(arg);
ZygoteCompactingCollector* collector = context->collector_;
@@ -2571,7 +2571,7 @@
zygote_space_->GetLiveBitmap()->VisitMarkedRange(
reinterpret_cast<uintptr_t>(zygote_space_->Begin()),
reinterpret_cast<uintptr_t>(zygote_space_->Limit()),
- [](mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
+ [](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(obj->AtomicSetMarkBit(0, 1));
});
}
@@ -2891,7 +2891,7 @@
explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
void VisitRoot(mirror::Object* root, const RootInfo& info)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
if (root == obj_) {
LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString();
}
@@ -2913,7 +2913,7 @@
class VerifyReferenceVisitor : public SingleRootVisitor {
public:
VerifyReferenceVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
: heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
size_t GetFailureCount() const {
@@ -2921,14 +2921,14 @@
}
void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (verify_referent_) {
VerifyReference(ref, ref->GetReferent(), mirror::Reference::ReferentOffset());
}
}
void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
VerifyReference(obj, obj->GetFieldObject<mirror::Object>(offset), offset);
}
@@ -2937,19 +2937,19 @@
}
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
}
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const_cast<VerifyReferenceVisitor*>(this)->VisitRoot(
root->AsMirrorPtr(), RootInfo(kRootVMInternal));
}
virtual void VisitRoot(mirror::Object* root, const RootInfo& root_info) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (root == nullptr) {
LOG(ERROR) << "Root is null with info " << root_info.GetType();
} else if (!VerifyReference(nullptr, root, MemberOffset(0))) {
@@ -3066,7 +3066,7 @@
: heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
void operator()(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
// Note: we are verifying the references in obj but not obj itself, this is because obj must
// be live or else how did we find it in the live bitmap?
VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
@@ -3075,12 +3075,12 @@
}
static void VisitCallback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
visitor->operator()(obj);
}
- void VerifyRoots() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_) {
+ void VerifyRoots() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_) {
ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
Runtime::Current()->VisitRoots(&visitor);
@@ -3172,7 +3172,7 @@
class VerifyReferenceCardVisitor {
public:
VerifyReferenceCardVisitor(Heap* heap, bool* failed)
- SHARED_REQUIRES(Locks::mutator_lock_,
+ REQUIRES_SHARED(Locks::mutator_lock_,
Locks::heap_bitmap_lock_)
: heap_(heap), failed_(failed) {
}
@@ -3250,7 +3250,7 @@
failed_(false) {}
void operator()(mirror::Object* obj) const
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
obj->VisitReferences(visitor, VoidFunctor());
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index b357b87..10bebef 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -195,7 +195,7 @@
mirror::Class* klass,
size_t num_bytes,
const PreFenceVisitor& pre_fence_visitor)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_,
!Roles::uninterruptible_) {
return AllocObjectWithAllocator<kInstrumented, true>(
@@ -207,7 +207,7 @@
mirror::Class* klass,
size_t num_bytes,
const PreFenceVisitor& pre_fence_visitor)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_,
!Roles::uninterruptible_) {
return AllocObjectWithAllocator<kInstrumented, true>(
@@ -220,7 +220,7 @@
size_t byte_count,
AllocatorType allocator,
const PreFenceVisitor& pre_fence_visitor)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_,
!Roles::uninterruptible_);
@@ -234,13 +234,13 @@
// Visit all of the live objects in the heap.
void VisitObjects(ObjectCallback callback, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
void VisitObjectsPaused(ObjectCallback callback, void* arg)
REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
void CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !native_histogram_lock_);
@@ -274,12 +274,12 @@
// A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
// and doesn't abort on error, allowing the caller to report more
// meaningful diagnostics.
- bool IsValidObjectAddress(const mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsValidObjectAddress(const mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
// Faster alternative to IsHeapAddress since finding if an object is in the large object space is
// very slow.
bool IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
// Requires the heap lock to be held.
@@ -287,10 +287,10 @@
bool search_allocation_stack = true,
bool search_live_stack = true,
bool sorted = false)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Returns true if there is any chance that the object (obj) will move.
- bool IsMovableObject(const mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsMovableObject(const mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
// Enables us to compacting GC until objects are released.
void IncrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
@@ -306,7 +306,7 @@
// Mutator lock is required for GetContinuousSpaces.
void ClearMarkedObjects()
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Initiates an explicit garbage collection.
void CollectGarbage(bool clear_soft_references)
@@ -323,17 +323,17 @@
bool use_is_assignable_from,
uint64_t* counts)
REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Implements JDWP RT_Instances.
void GetInstances(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Implements JDWP OR_ReferringObjects.
void GetReferringObjects(mirror::Object* o,
int32_t max_count,
std::vector<mirror::Object*>& referring_objects)
REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to
// implement dalvik.system.VMRuntime.clearGrowthLimit.
@@ -386,7 +386,7 @@
}
const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return continuous_spaces_;
}
@@ -527,12 +527,12 @@
// spaces in turn. If fail_ok is false then failing to find a space will cause an abort.
// TODO: consider using faster data structure like binary tree.
space::ContinuousSpace* FindContinuousSpaceFromObject(const mirror::Object*, bool fail_ok) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(const mirror::Object*,
bool fail_ok) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_, !native_histogram_lock_);
@@ -550,15 +550,15 @@
void RosAllocVerification(TimingLogger* timings, const char* name)
REQUIRES(Locks::mutator_lock_);
- accounting::HeapBitmap* GetLiveBitmap() SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
+ accounting::HeapBitmap* GetLiveBitmap() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
return live_bitmap_.get();
}
- accounting::HeapBitmap* GetMarkBitmap() SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
+ accounting::HeapBitmap* GetMarkBitmap() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
return mark_bitmap_.get();
}
- accounting::ObjectStack* GetLiveStack() SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
+ accounting::ObjectStack* GetLiveStack() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
return live_stack_.get();
}
@@ -566,7 +566,7 @@
// Mark and empty stack.
void FlushAllocStack()
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_);
// Revoke all the thread-local allocation stacks.
@@ -579,18 +579,18 @@
accounting::SpaceBitmap<kObjectAlignment>* bitmap2,
accounting::SpaceBitmap<kLargeObjectAlignment>* large_objects,
accounting::ObjectStack* stack)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_);
// Mark the specified allocation stack as live.
void MarkAllocStackAsLive(accounting::ObjectStack* stack)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_);
// Unbind any bound bitmaps.
void UnBindBitmaps()
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the boot image spaces. There may be multiple boot image spaces.
const std::vector<space::ImageSpace*>& GetBootImageSpaces() const {
@@ -598,10 +598,10 @@
}
bool ObjectIsInBootImageSpace(mirror::Object* obj) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool IsInBootImageOatFile(const void* p) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void GetBootImagesSize(uint32_t* boot_image_begin,
uint32_t* boot_image_end,
@@ -621,7 +621,7 @@
// Return the corresponding rosalloc space.
space::RosAllocSpace* GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
space::MallocSpace* GetNonMovingSpace() const {
return non_moving_space_;
@@ -646,8 +646,8 @@
}
}
- void DumpSpaces(std::ostream& stream) const SHARED_REQUIRES(Locks::mutator_lock_);
- std::string DumpSpaces() const SHARED_REQUIRES(Locks::mutator_lock_);
+ void DumpSpaces(std::ostream& stream) const REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string DumpSpaces() const REQUIRES_SHARED(Locks::mutator_lock_);
// Dump object should only be used by the signal handler.
void DumpObject(std::ostream& stream, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
@@ -759,23 +759,23 @@
REQUIRES(Locks::alloc_tracker_lock_);
void VisitAllocationRecords(RootVisitor* visitor) const
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::alloc_tracker_lock_);
void SweepAllocationRecords(IsMarkedVisitor* visitor) const
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::alloc_tracker_lock_);
void DisallowNewAllocationRecords() const
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::alloc_tracker_lock_);
void AllowNewAllocationRecords() const
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::alloc_tracker_lock_);
void BroadcastForNewAllocationRecords() const
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::alloc_tracker_lock_);
void DisableGCForShutdown() REQUIRES(!*gc_complete_lock_);
@@ -831,11 +831,11 @@
collector_type == kCollectorTypeHomogeneousSpaceCompact;
}
bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE void CheckConcurrentGC(Thread* self,
size_t new_num_bytes_allocated,
mirror::Object** obj)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
accounting::ObjectStack* GetMarkStack() {
@@ -848,7 +848,7 @@
mirror::Class** klass,
size_t byte_count,
const PreFenceVisitor& pre_fence_visitor)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
// Handles Allocate()'s slow allocation path with GC involved after
@@ -862,14 +862,14 @@
size_t* bytes_tl_bulk_allocated,
mirror::Class** klass)
REQUIRES(!Locks::thread_suspend_count_lock_, !*gc_complete_lock_, !*pending_task_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Allocate into a specific space.
mirror::Object* AllocateInto(Thread* self,
space::AllocSpace* space,
mirror::Class* c,
size_t bytes)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
// wrong space.
@@ -884,17 +884,17 @@
size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <bool kGrow>
ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size);
// Returns true if the address passed in is within the address range of a continuous space.
bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Run the finalizers. If timeout is non zero, then we use the VMRuntime version.
void RunFinalization(JNIEnv* env, uint64_t timeout);
@@ -908,7 +908,7 @@
REQUIRES(!*pending_task_lock_);
void RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, mirror::Object** obj)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*pending_task_lock_);
bool IsGCRequestPending() const;
@@ -964,10 +964,10 @@
size_t GetPercentFree();
static void VerificationCallback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_);
// Swap the allocation stack with the live stack.
- void SwapStacks() SHARED_REQUIRES(Locks::mutator_lock_);
+ void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_);
// Clear cards and update the mod union table. When process_alloc_space_cards is true,
// if clear_alloc_space_cards is true, then we clear cards instead of ageing them. We do
@@ -976,17 +976,17 @@
bool use_rem_sets,
bool process_alloc_space_cards,
bool clear_alloc_space_cards)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Push an object onto the allocation stack.
void PushOnAllocationStack(Thread* self, mirror::Object** obj)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
void PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, mirror::Object** obj)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
void ClearConcurrentGCRequest();
@@ -1008,7 +1008,7 @@
void TrimIndirectReferenceTables(Thread* self);
void VisitObjectsInternal(ObjectCallback callback, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
void VisitObjectsInternalRegionSpace(ObjectCallback callback, void* arg)
REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
@@ -1017,7 +1017,7 @@
// GC stress mode attempts to do one GC per unique backtrace.
void CheckGcStressMode(Thread* self, mirror::Object** obj)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
// All-known continuous spaces, where objects lie within fixed bounds.
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index d9dfedb..4788f8a 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -48,34 +48,34 @@
explicit ReferenceProcessor();
void ProcessReferences(bool concurrent, TimingLogger* timings, bool clear_soft_references,
gc::collector::GarbageCollector* collector)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!Locks::reference_processor_lock_);
// The slow path bool is contained in the reference class object, can only be set once
// Only allow setting this with mutators suspended so that we can avoid using a lock in the
// GetReferent fast path as an optimization.
- void EnableSlowPath() SHARED_REQUIRES(Locks::mutator_lock_);
+ void EnableSlowPath() REQUIRES_SHARED(Locks::mutator_lock_);
void BroadcastForSlowPath(Thread* self);
// Decode the referent, may block if references are being processed.
mirror::Object* GetReferent(Thread* self, mirror::Reference* reference)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::reference_processor_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::reference_processor_lock_);
void EnqueueClearedReferences(Thread* self) REQUIRES(!Locks::mutator_lock_);
void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
collector::GarbageCollector* collector)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void UpdateRoots(IsMarkedVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// Make a circular list with reference if it is not enqueued. Uses the finalizer queue lock.
bool MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::reference_processor_lock_,
!Locks::reference_queue_finalizer_references_lock_);
private:
- bool SlowPathEnabled() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool SlowPathEnabled() REQUIRES_SHARED(Locks::mutator_lock_);
// Called by ProcessReferences.
void DisableSlowPath(Thread* self) REQUIRES(Locks::reference_processor_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// If we are preserving references it means that some dead objects may become live, we use start
// and stop preserving to block mutators using GetReferrent from getting access to these
// referents.
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index 04d3454..1de1aa1 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -55,35 +55,35 @@
// threads since it uses a lock to avoid a race between checking for the references presence and
// adding it.
void AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*lock_);
// Enqueue a reference. The reference must be unprocessed.
// Not thread safe, used when mutators are paused to minimize lock overhead.
- void EnqueueReference(mirror::Reference* ref) SHARED_REQUIRES(Locks::mutator_lock_);
+ void EnqueueReference(mirror::Reference* ref) REQUIRES_SHARED(Locks::mutator_lock_);
// Dequeue a reference from the queue and return that dequeued reference.
- mirror::Reference* DequeuePendingReference() SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Reference* DequeuePendingReference() REQUIRES_SHARED(Locks::mutator_lock_);
// Enqueues finalizer references with white referents. White referents are blackened, moved to
// the zombie field, and the referent field is cleared.
void EnqueueFinalizerReferences(ReferenceQueue* cleared_references,
collector::GarbageCollector* collector)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Walks the reference list marking any references subject to the reference clearing policy.
// References with a black referent are removed from the list. References with white referents
// biased toward saving are blackened and also removed from the list.
void ForwardSoftReferences(MarkObjectVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Unlink the reference list clearing references objects with white referents. Cleared references
// registered to a reference queue are scheduled for appending by the heap worker thread.
void ClearWhiteReferences(ReferenceQueue* cleared_references,
collector::GarbageCollector* collector)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- void Dump(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_);
- size_t GetLength() const SHARED_REQUIRES(Locks::mutator_lock_);
+ void Dump(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
+ size_t GetLength() const REQUIRES_SHARED(Locks::mutator_lock_);
bool IsEmpty() const {
return list_ == nullptr;
@@ -91,13 +91,13 @@
void Clear() {
list_ = nullptr;
}
- mirror::Reference* GetList() SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Reference* GetList() REQUIRES_SHARED(Locks::mutator_lock_) {
return list_;
}
// Visits list_, currently only used for the mark compact GC.
void UpdateRoots(IsMarkedVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
// Lock, used for parallel GC reference enqueuing. It allows for multiple threads simultaneously
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
index 2263797..45cea5a 100644
--- a/runtime/gc/space/bump_pointer_space-inl.h
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -87,7 +87,7 @@
}
inline size_t BumpPointerSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
size_t num_bytes = obj->SizeOf();
if (usable_size != nullptr) {
*usable_size = RoundUp(num_bytes, kAlignment);
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 0e27d84..e9982e9 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -58,7 +58,7 @@
// Return the storage space required by obj.
size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return AllocationSizeNonvirtual(obj, usable_size);
}
@@ -72,7 +72,7 @@
}
size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Removes the fork time growth limit on capacity, allowing the application to allocate up to the
// maximum reserved size of the heap.
@@ -110,9 +110,9 @@
void AssertAllThreadLocalBuffersAreRevoked()
REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_);
- uint64_t GetBytesAllocated() SHARED_REQUIRES(Locks::mutator_lock_)
+ uint64_t GetBytesAllocated() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_);
- uint64_t GetObjectsAllocated() SHARED_REQUIRES(Locks::mutator_lock_)
+ uint64_t GetObjectsAllocated() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_);
bool IsEmpty() const {
return Begin() == End();
@@ -132,7 +132,7 @@
// Return the object which comes after obj, while ensuring alignment.
static mirror::Object* GetNextObject(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Allocate a new TLAB, returns false if the allocation failed.
bool AllocNewTlab(Thread* self, size_t bytes) REQUIRES(!block_lock_);
@@ -143,7 +143,7 @@
// Go through all of the blocks and visit the continuous objects.
void Walk(ObjectCallback* callback, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!block_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!block_lock_);
accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE;
@@ -154,7 +154,7 @@
}
void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Object alignment within the space.
static constexpr size_t kAlignment = 8;
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 455d28e..9282ec7 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -319,7 +319,7 @@
namespace allocator {
// Implement the dlmalloc morecore callback.
-void* ArtDlMallocMoreCore(void* mspace, intptr_t increment) SHARED_REQUIRES(Locks::mutator_lock_) {
+void* ArtDlMallocMoreCore(void* mspace, intptr_t increment) REQUIRES_SHARED(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
Heap* heap = runtime->GetHeap();
::art::gc::space::DlMallocSpace* dlmalloc_space = heap->GetDlMallocSpace();
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index eab757a..8fb2d76 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -65,11 +65,11 @@
// Virtual to allow MemoryToolMallocSpace to intercept.
virtual size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Virtual to allow MemoryToolMallocSpace to intercept.
virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE {
return num_bytes;
@@ -136,7 +136,7 @@
}
void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
protected:
DlMallocSpace(MemMap* mem_map, size_t initial_size, const std::string& name, void* mspace,
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index c87312b..e41c532 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -456,7 +456,7 @@
bool is_global_cache,
bool validate_oat_file,
std::string* error_msg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Note that we must not use the file descriptor associated with
// ScopedFlock::GetFile to Init the image file. We want the file
// descriptor (and the associated exclusive lock) to be released when
@@ -492,7 +492,7 @@
bool validate_oat_file,
const OatFile* oat_file,
std::string* error_msg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(image_filename != nullptr);
CHECK(image_location != nullptr);
@@ -865,14 +865,14 @@
explicit FixupRootVisitor(Args... args) : FixupVisitor(args...) {}
ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
}
ALWAYS_INLINE void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* ref = root->AsMirrorPtr();
mirror::Object* new_ref = ForwardObject(ref);
if (ref != new_ref) {
@@ -936,7 +936,7 @@
// java.lang.ref.Reference visitor.
void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
mirror::Object* obj = ref->GetReferent<kWithoutReadBarrier>();
ref->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
mirror::Reference::ReferentOffset(),
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 534232d..c407259 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -47,13 +47,13 @@
InstructionSet image_isa,
bool secondary_image,
std::string* error_msg)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Try to open an existing app image space.
static std::unique_ptr<ImageSpace> CreateFromAppImage(const char* image,
const OatFile* oat_file,
std::string* error_msg)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Reads the image header from the specified image location for the
// instruction set image_isa. Returns null on failure, with
@@ -70,7 +70,7 @@
std::unique_ptr<const OatFile> ReleaseOatFile();
void VerifyImageAllocations()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const ImageHeader& GetImageHeader() const {
return *reinterpret_cast<ImageHeader*>(Begin());
@@ -158,7 +158,7 @@
bool validate_oat_file,
const OatFile* oat_file,
std::string* error_msg)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static Atomic<uint32_t> bitmap_index_;
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index c726998..0320e79 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -96,7 +96,7 @@
return Begin() <= byte_obj && byte_obj < End();
}
void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Return true if the large object is a zygote large object. Potentially slow.
virtual bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const = 0;
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index c6b2870..f85ea46 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -63,9 +63,9 @@
// amount of the storage space that may be used by obj.
virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;
virtual size_t Free(Thread* self, mirror::Object* ptr)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Returns the maximum bytes that could be allocated for the given
// size in bulk, that is the maximum value for the
@@ -160,7 +160,7 @@
size_t maximum_size, bool low_memory_mode) = 0;
virtual void RegisterRecentFree(mirror::Object* ptr)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(lock_);
virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() {
@@ -196,7 +196,7 @@
private:
static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
DISALLOW_COPY_AND_ASSIGN(MallocSpace);
};
diff --git a/runtime/gc/space/memory_tool_malloc_space.h b/runtime/gc/space/memory_tool_malloc_space.h
index c081011..e53f009 100644
--- a/runtime/gc/space/memory_tool_malloc_space.h
+++ b/runtime/gc/space/memory_tool_malloc_space.h
@@ -43,10 +43,10 @@
size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE;
size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void RegisterRecentFree(mirror::Object* ptr ATTRIBUTE_UNUSED) OVERRIDE {}
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 823aa38..4e57a85 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -62,11 +62,11 @@
// Return the storage space required by obj.
size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_) {
return AllocationSizeNonvirtual(obj, usable_size);
}
size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
size_t Free(Thread*, mirror::Object*) OVERRIDE {
UNIMPLEMENTED(FATAL);
@@ -163,7 +163,7 @@
return nullptr;
}
void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
// Object alignment within the space.
static constexpr size_t kAlignment = kObjectAlignment;
@@ -503,7 +503,7 @@
}
mirror::Object* GetNextObject(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index b016095..8ccbfaa 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -379,7 +379,7 @@
// Callback from rosalloc when it needs to increase the footprint.
void* ArtRosAllocMoreCore(allocator::RosAlloc* rosalloc, intptr_t increment)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Heap* heap = Runtime::Current()->GetHeap();
art::gc::space::RosAllocSpace* rosalloc_space = heap->GetRosAllocSpace(rosalloc);
DCHECK(rosalloc_space != nullptr);
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index b175fbf..f9c7dbc 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -64,9 +64,9 @@
return AllocationSizeNonvirtual<true>(obj, usable_size);
}
size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index 20ef44a..bd600fe 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -52,7 +52,7 @@
heap->SetSpaceAsDefault(space);
}
- mirror::Class* GetByteArrayClass(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Class* GetByteArrayClass(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<1> hs(self);
auto null_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
if (byte_array_class_ == nullptr) {
@@ -71,7 +71,7 @@
size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<1> hs(self);
Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
mirror::Object* obj = alloc_space->Alloc(self,
@@ -91,7 +91,7 @@
size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<1> hs(self);
Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
mirror::Object* obj = alloc_space->AllocWithGrowth(self, bytes, bytes_allocated, usable_size,
@@ -103,7 +103,7 @@
}
void InstallClass(mirror::Object* o, mirror::Class* byte_array_class, size_t size)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Note the minimum size, which is the size of a zero-length byte array.
EXPECT_GE(size, SizeOfZeroLengthByteArray());
EXPECT_TRUE(byte_array_class != nullptr);
@@ -354,7 +354,7 @@
#define TEST_SizeFootPrintGrowthLimitAndTrimRandom(name, spaceName, spaceFn, size) \
TEST_F(spaceName##RandomTest, SizeFootPrintGrowthLimitAndTrim_RandomAllocationsWithMax_##name) { \
- SizeFootPrintGrowthLimitAndTrimDriver(-size, spaceFn); \
+ SizeFootPrintGrowthLimitAndTrimDriver(-(size), spaceFn); \
}
#define TEST_SPACE_CREATE_FN_STATIC(spaceName, spaceFn) \
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index f2889e2..4d10de8 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -33,7 +33,7 @@
static ZygoteSpace* Create(const std::string& name, MemMap* mem_map,
accounting::ContinuousSpaceBitmap* live_bitmap,
accounting::ContinuousSpaceBitmap* mark_bitmap)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void Dump(std::ostream& os) const;
@@ -77,7 +77,7 @@
}
void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
protected:
virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() {
diff --git a/runtime/gc_root.h b/runtime/gc_root.h
index 0304d0d..0a98f55 100644
--- a/runtime/gc_root.h
+++ b/runtime/gc_root.h
@@ -91,24 +91,24 @@
// Single root version, not overridable.
ALWAYS_INLINE void VisitRoot(mirror::Object** root, const RootInfo& info)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
VisitRoots(&root, 1, info);
}
// Single root version, not overridable.
ALWAYS_INLINE void VisitRootIfNonNull(mirror::Object** root, const RootInfo& info)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (*root != nullptr) {
VisitRoot(root, info);
}
}
virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
};
// Only visits roots one at a time, doesn't handle updating roots. Used when performance isn't
@@ -116,7 +116,7 @@
class SingleRootVisitor : public RootVisitor {
private:
void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
VisitRoot(*roots[i], info);
}
@@ -124,7 +124,7 @@
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
VisitRoot(roots[i]->AsMirrorPtr(), info);
}
@@ -169,10 +169,10 @@
public:
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ALWAYS_INLINE MirrorType* Read(GcRootSource* gc_root_source = nullptr) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void VisitRoot(RootVisitor* visitor, const RootInfo& info) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!IsNull());
mirror::CompressedReference<mirror::Object>* roots[1] = { &root_ };
visitor->VisitRoots(roots, 1u, info);
@@ -180,7 +180,7 @@
}
void VisitRootIfNonNull(RootVisitor* visitor, const RootInfo& info) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!IsNull()) {
VisitRoot(visitor, info);
}
@@ -196,7 +196,7 @@
}
ALWAYS_INLINE GcRoot() {}
- explicit ALWAYS_INLINE GcRoot(MirrorType* ref) SHARED_REQUIRES(Locks::mutator_lock_);
+ explicit ALWAYS_INLINE GcRoot(MirrorType* ref) REQUIRES_SHARED(Locks::mutator_lock_);
private:
// Root visitors take pointers to root_ and place them in CompressedReference** arrays. We use a
@@ -223,7 +223,7 @@
template <class MirrorType>
ALWAYS_INLINE void VisitRootIfNonNull(GcRoot<MirrorType>& root)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root.IsNull()) {
VisitRoot(root);
}
@@ -231,27 +231,27 @@
template <class MirrorType>
ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
}
template <class MirrorType>
- void VisitRoot(GcRoot<MirrorType>& root) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void VisitRoot(GcRoot<MirrorType>& root) REQUIRES_SHARED(Locks::mutator_lock_) {
VisitRoot(root.AddressWithoutBarrier());
}
template <class MirrorType>
void VisitRoot(mirror::CompressedReference<MirrorType>* root)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(buffer_pos_ >= kBufferSize)) {
Flush();
}
roots_[buffer_pos_++] = root;
}
- void Flush() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Flush() REQUIRES_SHARED(Locks::mutator_lock_) {
visitor_->VisitRoots(roots_, buffer_pos_, root_info_);
buffer_pos_ = 0;
}
diff --git a/runtime/generated/asm_support_gen.h b/runtime/generated/asm_support_gen.h
index 5c98ea6..03f5bf6 100644
--- a/runtime/generated/asm_support_gen.h
+++ b/runtime/generated/asm_support_gen.h
@@ -80,8 +80,8 @@
DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_SIZE_MINUS_ONE), (static_cast<int32_t>(art::mirror::DexCache::kDexCacheStringCacheSize - 1)))
#define STRING_DEX_CACHE_HASH_BITS 10
DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_HASH_BITS), (static_cast<int32_t>(art::LeastSignificantBit(art::mirror::DexCache::kDexCacheStringCacheSize))))
-#define STRING_DEX_CACHE_ELEMENT_SIZE 0x8
-DEFINE_CHECK_EQ(static_cast<size_t>(STRING_DEX_CACHE_ELEMENT_SIZE), (static_cast<size_t>(sizeof(art::mirror::StringDexCachePair))))
+#define STRING_DEX_CACHE_ELEMENT_SIZE 8
+DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_ELEMENT_SIZE), (static_cast<int32_t>(sizeof(art::mirror::StringDexCachePair))))
#define MIN_LARGE_OBJECT_THRESHOLD 0x3000
DEFINE_CHECK_EQ(static_cast<size_t>(MIN_LARGE_OBJECT_THRESHOLD), (static_cast<size_t>(art::gc::Heap::kMinLargeObjectThreshold)))
#define LOCK_WORD_STATE_SHIFT 30
diff --git a/runtime/globals.h b/runtime/globals.h
index 9045d40..aba5661 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -67,22 +67,30 @@
#if defined(ART_TARGET)
// Useful in conditionals where ART_TARGET isn't.
static constexpr bool kIsTargetBuild = true;
-#if defined(ART_TARGET_LINUX)
+# if defined(ART_TARGET_LINUX)
static constexpr bool kIsTargetLinux = true;
-#elif defined(ART_TARGET_ANDROID)
+# elif defined(ART_TARGET_ANDROID)
static constexpr bool kIsTargetLinux = false;
-#else
-#error "Either ART_TARGET_LINUX or ART_TARGET_ANDROID needs to be defined for target builds."
-#endif
+# else
+# error "Either ART_TARGET_LINUX or ART_TARGET_ANDROID needs to be defined for target builds."
+# endif
#else
static constexpr bool kIsTargetBuild = false;
-#if defined(ART_TARGET_LINUX)
-#error "ART_TARGET_LINUX defined for host build."
-#elif defined(ART_TARGET_ANDROID)
-#error "ART_TARGET_ANDROID defined for host build."
-#else
+# if defined(ART_TARGET_LINUX)
+# error "ART_TARGET_LINUX defined for host build."
+# elif defined(ART_TARGET_ANDROID)
+# error "ART_TARGET_ANDROID defined for host build."
+# else
static constexpr bool kIsTargetLinux = false;
+# endif
#endif
+
+// Are additional statically-linked ART host binaries (dex2oats,
+// oatdumps, etc.) built and available?
+#if !defined(ART_TARGET) && defined(ART_BUILD_HOST_STATIC)
+static constexpr bool kHostStaticBuildEnabled = true;
+#else
+static constexpr bool kHostStaticBuildEnabled = false;
#endif
// Garbage collector constants.
diff --git a/runtime/handle.h b/runtime/handle.h
index a415373..d4c13d4 100644
--- a/runtime/handle.h
+++ b/runtime/handle.h
@@ -20,8 +20,10 @@
#include "base/casts.h"
#include "base/logging.h"
#include "base/macros.h"
+#include "base/mutex.h"
#include "base/value_object.h"
-#include "stack.h"
+#include "jni.h"
+#include "stack_reference.h"
namespace art {
@@ -50,19 +52,19 @@
ALWAYS_INLINE explicit Handle(StackReference<T>* reference) : reference_(reference) {
}
- ALWAYS_INLINE T& operator*() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE T& operator*() const REQUIRES_SHARED(Locks::mutator_lock_) {
return *Get();
}
- ALWAYS_INLINE T* operator->() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE T* operator->() const REQUIRES_SHARED(Locks::mutator_lock_) {
return Get();
}
- ALWAYS_INLINE T* Get() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE T* Get() const REQUIRES_SHARED(Locks::mutator_lock_) {
return down_cast<T*>(reference_->AsMirrorPtr());
}
- ALWAYS_INLINE jobject ToJObject() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE jobject ToJObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(reference_->AsMirrorPtr() == nullptr)) {
// Special case so that we work with null handles.
return nullptr;
@@ -106,22 +108,22 @@
}
ALWAYS_INLINE MutableHandle(const MutableHandle<T>& handle)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: Handle<T>(handle.reference_) {
}
ALWAYS_INLINE MutableHandle<T>& operator=(const MutableHandle<T>& handle)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Handle<T>::operator=(handle);
return *this;
}
ALWAYS_INLINE explicit MutableHandle(StackReference<T>* reference)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: Handle<T>(reference) {
}
- ALWAYS_INLINE T* Assign(T* reference) SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE T* Assign(T* reference) REQUIRES_SHARED(Locks::mutator_lock_) {
StackReference<mirror::Object>* ref = Handle<T>::GetReference();
T* old = down_cast<T*>(ref->AsMirrorPtr());
ref->Assign(reference);
@@ -129,12 +131,12 @@
}
template<typename S>
- explicit MutableHandle(const MutableHandle<S>& handle) SHARED_REQUIRES(Locks::mutator_lock_)
+ explicit MutableHandle(const MutableHandle<S>& handle) REQUIRES_SHARED(Locks::mutator_lock_)
: Handle<T>(handle) {
}
template<typename S>
- explicit MutableHandle(StackReference<S>* reference) SHARED_REQUIRES(Locks::mutator_lock_)
+ explicit MutableHandle(StackReference<S>* reference) REQUIRES_SHARED(Locks::mutator_lock_)
: Handle<T>(reference) {
}
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index 67d7054..37eed99 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -22,8 +22,9 @@
#include "base/enums.h"
#include "base/logging.h"
#include "base/macros.h"
+#include "base/mutex.h"
#include "handle.h"
-#include "stack.h"
+#include "stack_reference.h"
#include "verify_object.h"
namespace art {
@@ -61,15 +62,15 @@
}
ALWAYS_INLINE mirror::Object* GetReference(size_t i) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE Handle<mirror::Object> GetHandle(size_t i);
ALWAYS_INLINE MutableHandle<mirror::Object> GetMutableHandle(size_t i)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE void SetReference(size_t i, mirror::Object* object)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE bool Contains(StackReference<mirror::Object>* handle_scope_entry) const;
@@ -150,14 +151,14 @@
ALWAYS_INLINE ~StackHandleScope();
template<class T>
- ALWAYS_INLINE MutableHandle<T> NewHandle(T* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE MutableHandle<T> NewHandle(T* object) REQUIRES_SHARED(Locks::mutator_lock_);
template<class T>
ALWAYS_INLINE HandleWrapper<T> NewHandleWrapper(T** object)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE void SetReference(size_t i, mirror::Object* object)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
Thread* Self() const {
return self_;
@@ -165,7 +166,7 @@
private:
template<class T>
- ALWAYS_INLINE MutableHandle<T> GetHandle(size_t i) SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE MutableHandle<T> GetHandle(size_t i) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_LT(i, kNumReferences);
return MutableHandle<T>(&GetReferences()[i]);
}
@@ -209,7 +210,7 @@
}
template<class T>
- MutableHandle<T> NewHandle(T* object) SHARED_REQUIRES(Locks::mutator_lock_) {
+ MutableHandle<T> NewHandle(T* object) REQUIRES_SHARED(Locks::mutator_lock_) {
if (scopes_.empty() || current_scope_num_refs_ >= kNumReferencesPerScope) {
StackHandleScope<kNumReferencesPerScope>* scope =
new StackHandleScope<kNumReferencesPerScope>(self_);
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 4005f05..921dde1 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -246,7 +246,7 @@
}
void AddIdList(mirror::ObjectArray<mirror::Object>* values)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const int32_t length = values->GetLength();
for (int32_t i = 0; i < length; ++i) {
AddObjectId(values->GetWithoutChecks(i));
@@ -489,23 +489,23 @@
private:
static void VisitObjectCallback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
DCHECK(arg != nullptr);
reinterpret_cast<Hprof*>(arg)->DumpHeapObject(obj);
}
void DumpHeapObject(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void DumpHeapClass(mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void DumpHeapArray(mirror::Array* obj, mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void DumpHeapInstanceObject(mirror::Object* obj, mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void ProcessHeap(bool header_first)
REQUIRES(Locks::mutator_lock_) {
@@ -555,7 +555,7 @@
output_->EndRecord();
}
- void WriteClassTable() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void WriteClassTable() REQUIRES_SHARED(Locks::mutator_lock_) {
for (const auto& p : classes_) {
mirror::Class* c = p.first;
HprofClassSerialNumber sn = p.second;
@@ -604,11 +604,11 @@
}
void VisitRoot(mirror::Object* obj, const RootInfo& root_info)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
void MarkRootObject(const mirror::Object* obj, jobject jni_obj, HprofHeapTag heap_tag,
uint32_t thread_serial);
- HprofClassObjectId LookupClassId(mirror::Class* c) SHARED_REQUIRES(Locks::mutator_lock_) {
+ HprofClassObjectId LookupClassId(mirror::Class* c) REQUIRES_SHARED(Locks::mutator_lock_) {
if (c != nullptr) {
auto it = classes_.find(c);
if (it == classes_.end()) {
@@ -623,7 +623,7 @@
}
HprofStackTraceSerialNumber LookupStackTraceSerialNumber(const mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
auto r = allocation_records_.find(obj);
if (r == allocation_records_.end()) {
return kHprofNullStackTrace;
@@ -635,7 +635,7 @@
}
}
- HprofStringId LookupStringId(mirror::String* string) SHARED_REQUIRES(Locks::mutator_lock_) {
+ HprofStringId LookupStringId(mirror::String* string) REQUIRES_SHARED(Locks::mutator_lock_) {
return LookupStringId(string->ToModifiedUtf8());
}
@@ -653,7 +653,7 @@
return id;
}
- HprofStringId LookupClassNameId(mirror::Class* c) SHARED_REQUIRES(Locks::mutator_lock_) {
+ HprofStringId LookupClassNameId(mirror::Class* c) REQUIRES_SHARED(Locks::mutator_lock_) {
return LookupStringId(PrettyDescriptor(c));
}
@@ -681,7 +681,7 @@
__ AddU4(static_cast<uint32_t>(nowMs & 0xFFFFFFFF));
}
- void WriteStackTraces() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void WriteStackTraces() REQUIRES_SHARED(Locks::mutator_lock_) {
// Write a dummy stack trace record so the analysis tools don't freak out.
output_->StartNewRecord(HPROF_TAG_STACK_TRACE, kHprofTime);
__ AddStackTraceSerialNumber(kHprofNullStackTrace);
@@ -1072,14 +1072,14 @@
// Note that these don't have read barriers. Its OK however since the GC is guaranteed to not be
// running during the hprof dumping process.
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
}
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* obj = root->AsMirrorPtr();
// The two cases are either classes or dex cache arrays. If it is a dex cache array, then use
// VM internal. Otherwise the object is a declaring class of an ArtField or ArtMethod or a
diff --git a/runtime/image-inl.h b/runtime/image-inl.h
index 28620db..669649e 100644
--- a/runtime/image-inl.h
+++ b/runtime/image-inl.h
@@ -21,6 +21,7 @@
#include "art_method.h"
#include "imtable.h"
+#include "read_barrier-inl.h"
namespace art {
diff --git a/runtime/image.cc b/runtime/image.cc
index 6888183..7e6790a 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -20,6 +20,7 @@
#include "mirror/object_array.h"
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
+#include "utils.h"
namespace art {
@@ -179,4 +180,8 @@
}
}
+PointerSize ImageHeader::GetPointerSize() const {
+ return ConvertToPointerSize(pointer_size_);
+}
+
} // namespace art
diff --git a/runtime/image.h b/runtime/image.h
index 9ff18d6..3a4fa79 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -157,9 +157,7 @@
return reinterpret_cast<uint8_t*>(oat_file_end_);
}
- PointerSize GetPointerSize() const {
- return ConvertToPointerSize(pointer_size_);
- }
+ PointerSize GetPointerSize() const;
uint32_t GetPointerSizeUnchecked() const {
return pointer_size_;
@@ -229,11 +227,11 @@
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
mirror::Object* GetImageRoot(ImageRoot image_root) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
mirror::ObjectArray<mirror::Object>* GetImageRoots() const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void RelocateImage(off_t delta);
void RelocateImageMethods(off_t delta);
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 8e49492..4f81b59 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -36,10 +36,10 @@
class MutatorLockedDumpable {
public:
explicit MutatorLockedDumpable(T& value)
- SHARED_REQUIRES(Locks::mutator_lock_) : value_(value) {
+ REQUIRES_SHARED(Locks::mutator_lock_) : value_(value) {
}
- void Dump(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Dump(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_) {
value_.Dump(os);
}
@@ -51,7 +51,7 @@
template<typename T>
std::ostream& operator<<(std::ostream& os, const MutatorLockedDumpable<T>& rhs)
-// TODO: should be SHARED_REQUIRES(Locks::mutator_lock_) however annotalysis
+// TODO: should be REQUIRES_SHARED(Locks::mutator_lock_) however annotalysis
// currently fails for this.
NO_THREAD_SAFETY_ANALYSIS {
rhs.Dump(os);
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 2d0ae63..13c6225 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -199,7 +199,7 @@
static const size_t kIRTPrevCount = kIsDebugBuild ? 7 : 3;
class IrtEntry {
public:
- void Add(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Add(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
++serial_;
if (serial_ == kIRTPrevCount) {
serial_ = 0;
@@ -227,11 +227,11 @@
class IrtIterator {
public:
- IrtIterator(IrtEntry* table, size_t i, size_t capacity) SHARED_REQUIRES(Locks::mutator_lock_)
+ IrtIterator(IrtEntry* table, size_t i, size_t capacity) REQUIRES_SHARED(Locks::mutator_lock_)
: table_(table), i_(i), capacity_(capacity) {
}
- IrtIterator& operator++() SHARED_REQUIRES(Locks::mutator_lock_) {
+ IrtIterator& operator++() REQUIRES_SHARED(Locks::mutator_lock_) {
++i_;
return *this;
}
@@ -277,7 +277,7 @@
* failed during expansion).
*/
IndirectRef Add(uint32_t cookie, mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Given an IndirectRef in the table, return the Object it refers to.
@@ -285,12 +285,12 @@
* Returns kInvalidIndirectRefObject if iref is invalid.
*/
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- mirror::Object* Get(IndirectRef iref) const SHARED_REQUIRES(Locks::mutator_lock_)
+ mirror::Object* Get(IndirectRef iref) const REQUIRES_SHARED(Locks::mutator_lock_)
ALWAYS_INLINE;
// Synchronized get which reads a reference, acquiring a lock if necessary.
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- mirror::Object* SynchronizedGet(IndirectRef iref) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Object* SynchronizedGet(IndirectRef iref) const REQUIRES_SHARED(Locks::mutator_lock_) {
return Get<kReadBarrierOption>(iref);
}
@@ -299,7 +299,7 @@
*
* Updates an existing indirect reference to point to a new object.
*/
- void Update(IndirectRef iref, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ void Update(IndirectRef iref, mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Remove an existing entry.
@@ -314,7 +314,7 @@
void AssertEmpty();
- void Dump(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_);
+ void Dump(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Return the #of entries in the entire table. This includes holes, and
@@ -334,7 +334,7 @@
}
void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
uint32_t GetSegmentState() const {
return segment_state_.all;
@@ -352,7 +352,7 @@
}
// Release pages past the end of the table that may have previously held references.
- void Trim() SHARED_REQUIRES(Locks::mutator_lock_);
+ void Trim() REQUIRES_SHARED(Locks::mutator_lock_);
private:
// Extract the table index from an indirect reference.
diff --git a/runtime/indirect_reference_table_test.cc b/runtime/indirect_reference_table_test.cc
index f376ec0..61bcadd 100644
--- a/runtime/indirect_reference_table_test.cc
+++ b/runtime/indirect_reference_table_test.cc
@@ -26,7 +26,7 @@
class IndirectReferenceTableTest : public CommonRuntimeTest {};
static void CheckDump(IndirectReferenceTable* irt, size_t num_objects, size_t num_unique)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostringstream oss;
irt->Dump(oss);
if (num_objects == 0) {
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 4a86e36..388561b 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -101,12 +101,12 @@
}
static void UpdateEntrypoints(ArtMethod* method, const void* quick_code)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
method->SetEntryPointFromQuickCompiledCode(quick_code);
}
bool Instrumentation::NeedDebugVersionForBootImageCode(ArtMethod* method, const void* code) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return Dbg::IsDebuggerActive() &&
Runtime::Current()->GetHeap()->IsInBootImageOatFile(code) &&
!method->IsNative() &&
@@ -169,7 +169,7 @@
// Since we may already have done this previously, we need to push new instrumentation frame before
// existing instrumentation frames.
static void InstrumentationInstallStack(Thread* thread, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
struct InstallStackVisitor FINAL : public StackVisitor {
InstallStackVisitor(Thread* thread_in, Context* context, uintptr_t instrumentation_exit_pc)
: StackVisitor(thread_in, context, kInstrumentationStackWalk),
@@ -179,7 +179,7 @@
last_return_pc_(0) {
}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m == nullptr) {
if (kVerboseInstrumentation) {
@@ -329,7 +329,7 @@
instrumentation_stack_(thread_in->GetInstrumentationStack()),
frames_removed_(0) {}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
if (instrumentation_stack_->size() == 0) {
return false; // Stop.
}
@@ -1019,7 +1019,7 @@
static void CheckStackDepth(Thread* self, const InstrumentationStackFrame& instrumentation_frame,
int delta)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
size_t frame_id = StackVisitor::ComputeNumFrames(self, kInstrumentationStackWalk) + delta;
if (frame_id != instrumentation_frame.frame_id_) {
LOG(ERROR) << "Expected frame_id=" << frame_id << " but found "
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 757be8e..1e5fcf2 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -64,24 +64,24 @@
// Call-back for when a method is entered.
virtual void MethodEntered(Thread* thread, mirror::Object* this_object,
ArtMethod* method,
- uint32_t dex_pc) SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Call-back for when a method is exited.
virtual void MethodExited(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc,
const JValue& return_value)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Call-back for when a method is popped due to an exception throw. A method will either cause a
// MethodExited call-back or a MethodUnwind call-back when its activation is removed.
virtual void MethodUnwind(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Call-back for when the dex pc moves in a method.
virtual void DexPcMoved(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t new_dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Call-back for when we read from a field.
virtual void FieldRead(Thread* thread, mirror::Object* this_object, ArtMethod* method,
@@ -93,14 +93,14 @@
// Call-back when an exception is caught.
virtual void ExceptionCaught(Thread* thread, mirror::Throwable* exception_object)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Call-back for when we execute a branch.
virtual void Branch(Thread* thread,
ArtMethod* method,
uint32_t dex_pc,
int32_t dex_pc_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Call-back for when we get an invokevirtual or an invokeinterface.
virtual void InvokeVirtualOrInterface(Thread* thread,
@@ -109,7 +109,7 @@
uint32_t dex_pc,
ArtMethod* callee)
REQUIRES(Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
};
// Instrumentation is a catch-all for when extra information is required from the runtime. The
@@ -161,7 +161,7 @@
bool AreAllMethodsDeoptimized() const {
return interpreter_stubs_installed_;
}
- bool ShouldNotifyMethodEnterExitEvents() const SHARED_REQUIRES(Locks::mutator_lock_);
+ bool ShouldNotifyMethodEnterExitEvents() const REQUIRES_SHARED(Locks::mutator_lock_);
// Executes everything with interpreter.
void DeoptimizeEverything(const char* key)
@@ -192,7 +192,7 @@
// Indicates whether the method has been deoptimized so it is executed with the interpreter.
bool IsDeoptimized(ArtMethod* method)
- REQUIRES(!deoptimized_methods_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!deoptimized_methods_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
// Enable method tracing by installing instrumentation entry/exit stubs or interpreter.
void EnableMethodTracing(const char* key,
@@ -210,7 +210,7 @@
!deoptimized_methods_lock_);
InterpreterHandlerTable GetInterpreterHandlerTable() const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return interpreter_handler_table_;
}
@@ -226,17 +226,17 @@
// Update the code of a method respecting any installed stubs.
void UpdateMethodsCode(ArtMethod* method, const void* quick_code)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
// Update the code of a method respecting any installed stubs from debugger.
void UpdateMethodsCodeFromDebugger(ArtMethod* method, const void* quick_code)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
// Get the quick code for the given method. More efficient than asking the class linker as it
// will short-cut to GetCode if instrumentation and static method resolution stubs aren't
// installed.
const void* GetQuickCodeFor(ArtMethod* method, PointerSize pointer_size) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void ForceInterpretOnly() {
interpret_only_ = true;
@@ -255,49 +255,49 @@
// Code is in boot image oat file which isn't compiled as debuggable.
// Need debug version (interpreter or jitted) if that's the case.
bool NeedDebugVersionForBootImageCode(ArtMethod* method, const void* code) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool AreExitStubsInstalled() const {
return instrumentation_stubs_installed_;
}
- bool HasMethodEntryListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool HasMethodEntryListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
return have_method_entry_listeners_;
}
- bool HasMethodExitListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool HasMethodExitListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
return have_method_exit_listeners_;
}
- bool HasMethodUnwindListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool HasMethodUnwindListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
return have_method_unwind_listeners_;
}
- bool HasDexPcListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool HasDexPcListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
return have_dex_pc_listeners_;
}
- bool HasFieldReadListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool HasFieldReadListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
return have_field_read_listeners_;
}
- bool HasFieldWriteListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool HasFieldWriteListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
return have_field_write_listeners_;
}
- bool HasExceptionCaughtListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool HasExceptionCaughtListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
return have_exception_caught_listeners_;
}
- bool HasBranchListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool HasBranchListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
return have_branch_listeners_;
}
- bool HasInvokeVirtualOrInterfaceListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool HasInvokeVirtualOrInterfaceListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
return have_invoke_virtual_or_interface_listeners_;
}
- bool IsActive() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsActive() const REQUIRES_SHARED(Locks::mutator_lock_) {
return have_dex_pc_listeners_ || have_method_entry_listeners_ || have_method_exit_listeners_ ||
have_field_read_listeners_ || have_field_write_listeners_ ||
have_exception_caught_listeners_ || have_method_unwind_listeners_ ||
@@ -305,7 +305,7 @@
}
// Any instrumentation *other* than what is needed for Jit profiling active?
- bool NonJitProfilingActive() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool NonJitProfilingActive() const REQUIRES_SHARED(Locks::mutator_lock_) {
return have_dex_pc_listeners_ || have_method_exit_listeners_ ||
have_field_read_listeners_ || have_field_write_listeners_ ||
have_exception_caught_listeners_ || have_method_unwind_listeners_ ||
@@ -316,7 +316,7 @@
// listeners into executing code and get method enter events for methods already on the stack.
void MethodEnterEvent(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(HasMethodEntryListeners())) {
MethodEnterEventImpl(thread, this_object, method, dex_pc);
}
@@ -326,7 +326,7 @@
void MethodExitEvent(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc,
const JValue& return_value) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(HasMethodExitListeners())) {
MethodExitEventImpl(thread, this_object, method, dex_pc, return_value);
}
@@ -335,12 +335,12 @@
// Inform listeners that a method has been exited due to an exception.
void MethodUnwindEvent(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Inform listeners that the dex pc has moved (only supported by the interpreter).
void DexPcMovedEvent(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(HasDexPcListeners())) {
DexPcMovedEventImpl(thread, this_object, method, dex_pc);
}
@@ -348,7 +348,7 @@
// Inform listeners that a branch has been taken (only supported by the interpreter).
void Branch(Thread* thread, ArtMethod* method, uint32_t dex_pc, int32_t offset) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(HasBranchListeners())) {
BranchImpl(thread, method, dex_pc, offset);
}
@@ -358,7 +358,7 @@
void FieldReadEvent(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc,
ArtField* field) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(HasFieldReadListeners())) {
FieldReadEventImpl(thread, this_object, method, dex_pc, field);
}
@@ -368,7 +368,7 @@
void FieldWriteEvent(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc,
ArtField* field, const JValue& field_value) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(HasFieldWriteListeners())) {
FieldWriteEventImpl(thread, this_object, method, dex_pc, field, field_value);
}
@@ -379,7 +379,7 @@
ArtMethod* caller,
uint32_t dex_pc,
ArtMethod* callee) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(HasInvokeVirtualOrInterfaceListeners())) {
InvokeVirtualOrInterfaceImpl(thread, this_object, caller, dex_pc, callee);
}
@@ -387,48 +387,48 @@
// Inform listeners that an exception was caught.
void ExceptionCaughtEvent(Thread* thread, mirror::Throwable* exception_object) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Called when an instrumented method is entered. The intended link register (lr) is saved so
// that returning causes a branch to the method exit stub. Generates method enter events.
void PushInstrumentationStackFrame(Thread* self, mirror::Object* this_object,
ArtMethod* method, uintptr_t lr,
bool interpreter_entry)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Called when an instrumented method is exited. Removes the pushed instrumentation frame
// returning the intended link register. Generates method exit events.
TwoWordReturn PopInstrumentationStackFrame(Thread* self, uintptr_t* return_pc,
uint64_t gpr_result, uint64_t fpr_result)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
// Pops an instrumentation frame from the current thread and generate an unwind event.
// Returns the return pc for the instrumentation frame that's popped.
uintptr_t PopMethodForUnwind(Thread* self, bool is_deoptimization) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Call back for configure stubs.
- void InstallStubsForClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_)
+ void InstallStubsForClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!deoptimized_methods_lock_);
void InstallStubsForMethod(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
// Install instrumentation exit stub on every method of the stack of the given thread.
// This is used by the debugger to cause a deoptimization of the thread's stack after updating
// local variable(s).
void InstrumentThreadStack(Thread* thread)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::thread_list_lock_);
static size_t ComputeFrameId(Thread* self,
size_t frame_depth,
size_t inlined_frames_before_frame)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Does not hold lock, used to check if someone changed from not instrumented to instrumented
// during a GC suspend point.
- bool AllocEntrypointsInstrumented() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool AllocEntrypointsInstrumented() const REQUIRES_SHARED(Locks::mutator_lock_) {
return alloc_entrypoints_instrumented_;
}
@@ -463,44 +463,44 @@
void MethodEnterEventImpl(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void MethodExitEventImpl(Thread* thread, mirror::Object* this_object,
ArtMethod* method,
uint32_t dex_pc, const JValue& return_value) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void DexPcMovedEventImpl(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void BranchImpl(Thread* thread, ArtMethod* method, uint32_t dex_pc, int32_t offset) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void InvokeVirtualOrInterfaceImpl(Thread* thread,
mirror::Object* this_object,
ArtMethod* caller,
uint32_t dex_pc,
ArtMethod* callee) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void FieldReadEventImpl(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc,
ArtField* field) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void FieldWriteEventImpl(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc,
ArtField* field, const JValue& field_value) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Read barrier-aware utility functions for accessing deoptimized_methods_
bool AddDeoptimizedMethod(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(deoptimized_methods_lock_);
bool IsDeoptimizedMethod(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_, deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_, deoptimized_methods_lock_);
bool RemoveDeoptimizedMethod(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(deoptimized_methods_lock_);
ArtMethod* BeginDeoptimizedMethod()
- SHARED_REQUIRES(Locks::mutator_lock_, deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_, deoptimized_methods_lock_);
bool IsDeoptimizedMethodsEmpty() const
- SHARED_REQUIRES(Locks::mutator_lock_, deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_, deoptimized_methods_lock_);
void UpdateMethodsCodeImpl(ArtMethod* method, const void* quick_code)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
// Have we hijacked ArtMethod::code_ so that it calls instrumentation/interpreter code?
@@ -610,7 +610,7 @@
interpreter_entry_(interpreter_entry) {
}
- std::string Dump() const SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* this_object_;
ArtMethod* method_;
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index 684c471..abe3184 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -47,7 +47,7 @@
mirror::Object* this_object ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
received_method_enter_event = true;
}
@@ -56,7 +56,7 @@
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
const JValue& return_value ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
received_method_exit_event = true;
}
@@ -64,7 +64,7 @@
mirror::Object* this_object ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
received_method_unwind_event = true;
}
@@ -72,7 +72,7 @@
mirror::Object* this_object ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t new_dex_pc ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
received_dex_pc_moved_event = true;
}
@@ -81,7 +81,7 @@
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
ArtField* field ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
received_field_read_event = true;
}
@@ -91,13 +91,13 @@
uint32_t dex_pc ATTRIBUTE_UNUSED,
ArtField* field ATTRIBUTE_UNUSED,
const JValue& field_value ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
received_field_written_event = true;
}
void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED,
mirror::Throwable* exception_object ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
received_exception_caught_event = true;
}
@@ -105,7 +105,7 @@
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
int32_t dex_pc_offset ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
received_branch_event = true;
}
@@ -114,7 +114,7 @@
ArtMethod* caller ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
ArtMethod* callee ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
received_invoke_virtual_or_interface_event = true;
}
@@ -205,7 +205,7 @@
}
void DeoptimizeMethod(Thread* self, ArtMethod* method, bool enable_deoptimization)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
ScopedThreadSuspension sts(self, kSuspended);
@@ -221,7 +221,7 @@
void UndeoptimizeMethod(Thread* self, ArtMethod* method,
const char* key, bool disable_deoptimization)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
ScopedThreadSuspension sts(self, kSuspended);
@@ -236,7 +236,7 @@
}
void DeoptimizeEverything(Thread* self, const char* key, bool enable_deoptimization)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
ScopedThreadSuspension sts(self, kSuspended);
@@ -251,7 +251,7 @@
}
void UndeoptimizeEverything(Thread* self, const char* key, bool disable_deoptimization)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
ScopedThreadSuspension sts(self, kSuspended);
@@ -266,7 +266,7 @@
}
void EnableMethodTracing(Thread* self, const char* key, bool needs_interpreter)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
ScopedThreadSuspension sts(self, kSuspended);
@@ -278,7 +278,7 @@
}
void DisableMethodTracing(Thread* self, const char* key)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
ScopedThreadSuspension sts(self, kSuspended);
@@ -291,7 +291,7 @@
private:
static bool HasEventListener(const instrumentation::Instrumentation* instr, uint32_t event_type)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
switch (event_type) {
case instrumentation::Instrumentation::kMethodEntered:
return instr->HasMethodEntryListeners();
@@ -320,7 +320,7 @@
static void ReportEvent(const instrumentation::Instrumentation* instr, uint32_t event_type,
Thread* self, ArtMethod* method, mirror::Object* obj,
uint32_t dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
switch (event_type) {
case instrumentation::Instrumentation::kMethodEntered:
instr->MethodEnterEvent(self, obj, method, dex_pc);
@@ -588,7 +588,7 @@
do { \
Instrumentation* const instr = Runtime::Current()->GetInstrumentation(); \
bool interpreter = \
- (_level == Instrumentation::InstrumentationLevel::kInstrumentWithInterpreter); \
+ ((_level) == Instrumentation::InstrumentationLevel::kInstrumentWithInterpreter); \
EXPECT_EQ(_level, GetCurrentInstrumentationLevel()); \
EXPECT_EQ(_user_count, GetInstrumentationUserCount()); \
if (instr->IsForcedInterpretOnly()) { \
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index f845de5..184fbdc 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -58,44 +58,44 @@
// Interns a potentially new string in the 'strong' table. May cause thread suspension.
mirror::String* InternStrong(int32_t utf16_length, const char* utf8_data)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
// Only used by image writer. Special version that may not cause thread suspension since the GC
// cannot be running while we are doing image writing. Maybe be called while while holding a
// lock since there will not be thread suspension.
mirror::String* InternStrongImageString(mirror::String* s)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Interns a potentially new string in the 'strong' table. May cause thread suspension.
- mirror::String* InternStrong(const char* utf8_data) SHARED_REQUIRES(Locks::mutator_lock_)
+ mirror::String* InternStrong(const char* utf8_data) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
// Interns a potentially new string in the 'strong' table. May cause thread suspension.
- mirror::String* InternStrong(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_)
+ mirror::String* InternStrong(mirror::String* s) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
// Interns a potentially new string in the 'weak' table. May cause thread suspension.
- mirror::String* InternWeak(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_)
+ mirror::String* InternWeak(mirror::String* s) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
- void SweepInternTableWeaks(IsMarkedVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_)
+ void SweepInternTableWeaks(IsMarkedVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::intern_table_lock_);
- bool ContainsWeak(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_)
+ bool ContainsWeak(mirror::String* s) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::intern_table_lock_);
// Lookup a strong intern, returns null if not found.
mirror::String* LookupStrong(Thread* self, mirror::String* s)
REQUIRES(!Locks::intern_table_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::String* LookupStrong(Thread* self, uint32_t utf16_length, const char* utf8_data)
REQUIRES(!Locks::intern_table_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Lookup a weak intern, returns null if not found.
mirror::String* LookupWeak(Thread* self, mirror::String* s)
REQUIRES(!Locks::intern_table_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Total number of interned strings.
size_t Size() const REQUIRES(!Locks::intern_table_lock_);
@@ -107,31 +107,31 @@
size_t WeakSize() const REQUIRES(!Locks::intern_table_lock_);
void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
void DumpForSigQuit(std::ostream& os) const REQUIRES(!Locks::intern_table_lock_);
- void BroadcastForNewInterns() SHARED_REQUIRES(Locks::mutator_lock_);
+ void BroadcastForNewInterns() REQUIRES_SHARED(Locks::mutator_lock_);
// Adds all of the resolved image strings from the image spaces into the intern table. The
// advantage of doing this is preventing expensive DexFile::FindStringId calls. Sets
// images_added_to_intern_table_ to true.
void AddImagesStringsToTable(const std::vector<gc::space::ImageSpace*>& image_spaces)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
// Add a new intern table for inserting to, previous intern tables are still there but no
// longer inserted into and ideally unmodified. This is done to prevent dirty pages.
void AddNewTable()
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
// Read the intern table from memory. The elements aren't copied, the intern hash set data will
// point to somewhere within ptr. Only reads the strong interns.
size_t AddTableFromMemory(const uint8_t* ptr) REQUIRES(!Locks::intern_table_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Write the post zygote intern table to a pointer. Only writes the strong interns since it is
// expected that there is no weak interns since this is called from the image writer.
- size_t WriteToMemory(uint8_t* ptr) SHARED_REQUIRES(Locks::mutator_lock_)
+ size_t WriteToMemory(uint8_t* ptr) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::intern_table_lock_);
// Change the weak root state. May broadcast to waiters.
@@ -181,18 +181,18 @@
class Table {
public:
Table();
- mirror::String* Find(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_)
+ mirror::String* Find(mirror::String* s) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::intern_table_lock_);
- mirror::String* Find(const Utf8String& string) SHARED_REQUIRES(Locks::mutator_lock_)
+ mirror::String* Find(const Utf8String& string) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::intern_table_lock_);
- void Insert(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_)
+ void Insert(mirror::String* s) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::intern_table_lock_);
void Remove(mirror::String* s)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
void VisitRoots(RootVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
void SweepWeaks(IsMarkedVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
// Add a new intern table that will only be inserted into from now on.
void AddNewTable() REQUIRES(Locks::intern_table_lock_);
size_t Size() const REQUIRES(Locks::intern_table_lock_);
@@ -200,18 +200,18 @@
// Tables read are inserted at the front of the table array. Only checks for conflicts in
// debug builds. Returns how many bytes were read.
size_t AddTableFromMemory(const uint8_t* ptr)
- REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
// Write the intern tables to ptr, if there are multiple tables they are combined into a single
// one. Returns how many bytes were written.
size_t WriteToMemory(uint8_t* ptr)
- REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
private:
typedef HashSet<GcRoot<mirror::String>, GcRootEmptyFn, StringHashEquals, StringHashEquals,
TrackingAllocator<GcRoot<mirror::String>, kAllocatorTagInternTable>> UnorderedSet;
void SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
// We call AddNewTable when we create the zygote to reduce private dirty pages caused by
// modifying the zygote intern table. The back of table is modified when strings are interned.
@@ -222,35 +222,35 @@
// If holding_locks is true, then we may also hold other locks. If holding_locks is true, then we
// require GC is not running since it is not safe to wait while holding locks.
mirror::String* Insert(mirror::String* s, bool is_strong, bool holding_locks)
- REQUIRES(!Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
mirror::String* LookupStrongLocked(mirror::String* s)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
mirror::String* LookupWeakLocked(mirror::String* s)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
mirror::String* InsertStrong(mirror::String* s)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
mirror::String* InsertWeak(mirror::String* s)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
void RemoveStrong(mirror::String* s)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
void RemoveWeak(mirror::String* s)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
// Transaction rollback access.
mirror::String* LookupStringFromImage(mirror::String* s)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
mirror::String* InsertStrongFromTransaction(mirror::String* s)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
mirror::String* InsertWeakFromTransaction(mirror::String* s)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
void RemoveStrongFromTransaction(mirror::String* s)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
void RemoveWeakFromTransaction(mirror::String* s)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
size_t AddTableFromMemoryLocked(const uint8_t* ptr)
- REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
// Change the weak root state. May broadcast to waiters.
void ChangeWeakRootStateLocked(gc::WeakRootState new_state)
@@ -258,7 +258,7 @@
// Wait until we can read weak roots.
void WaitUntilAccessible(Thread* self)
- REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
bool images_added_to_intern_table_ GUARDED_BY(Locks::intern_table_lock_);
bool log_new_roots_ GUARDED_BY(Locks::intern_table_lock_);
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index fe78bf2..620e15b 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -64,7 +64,7 @@
class TestPredicate : public IsMarkedVisitor {
public:
- mirror::Object* IsMarked(mirror::Object* s) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Object* IsMarked(mirror::Object* s) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
bool erased = false;
for (auto it = expected_.begin(), end = expected_.end(); it != end; ++it) {
if (*it == s) {
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 101c9a1..0003e72 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -20,7 +20,6 @@
#include "common_throws.h"
#include "interpreter_common.h"
-#include "interpreter_goto_table_impl.h"
#include "interpreter_mterp_impl.h"
#include "interpreter_switch_impl.h"
#include "mirror/string-inl.h"
@@ -37,7 +36,7 @@
static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& shorty,
Object* receiver, uint32_t* args, JValue* result)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// TODO: The following enters JNI code using a typedef-ed function rather than the JNI compiler,
// it should be removed and JNI compiled stubs used instead.
ScopedObjectAccessUnchecked soa(self);
@@ -231,15 +230,12 @@
enum InterpreterImplKind {
kSwitchImplKind, // Switch-based interpreter implementation.
- kComputedGotoImplKind, // Computed-goto-based interpreter implementation.
kMterpImplKind // Assembly interpreter
};
static std::ostream& operator<<(std::ostream& os, const InterpreterImplKind& rhs) {
os << ((rhs == kSwitchImplKind)
? "Switch-based interpreter"
- : (rhs == kComputedGotoImplKind)
- ? "Computed-goto-based interpreter"
- : "Asm interpreter");
+ : "Asm interpreter");
return os;
}
@@ -250,7 +246,7 @@
const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame,
JValue result_register,
- bool stay_in_interpreter = false) SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool stay_in_interpreter = false) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!shadow_frame.GetMethod()->IsAbstract());
DCHECK(!shadow_frame.GetMethod()->IsNative());
if (LIKELY(shadow_frame.GetDexPC() == 0)) { // Entering the method, but not via deoptimization.
@@ -323,7 +319,8 @@
}
}
}
- } else if (kInterpreterImplKind == kSwitchImplKind) {
+ } else {
+ DCHECK_EQ(kInterpreterImplKind, kSwitchImplKind);
if (transaction_active) {
return ExecuteSwitchImpl<false, true>(self, code_item, shadow_frame, result_register,
false);
@@ -331,13 +328,6 @@
return ExecuteSwitchImpl<false, false>(self, code_item, shadow_frame, result_register,
false);
}
- } else {
- DCHECK_EQ(kInterpreterImplKind, kComputedGotoImplKind);
- if (transaction_active) {
- return ExecuteGotoImpl<false, true>(self, code_item, shadow_frame, result_register);
- } else {
- return ExecuteGotoImpl<false, false>(self, code_item, shadow_frame, result_register);
- }
}
} else {
// Enter the "with access check" interpreter.
@@ -350,7 +340,8 @@
return ExecuteSwitchImpl<true, false>(self, code_item, shadow_frame, result_register,
false);
}
- } else if (kInterpreterImplKind == kSwitchImplKind) {
+ } else {
+ DCHECK_EQ(kInterpreterImplKind, kSwitchImplKind);
if (transaction_active) {
return ExecuteSwitchImpl<true, true>(self, code_item, shadow_frame, result_register,
false);
@@ -358,13 +349,6 @@
return ExecuteSwitchImpl<true, false>(self, code_item, shadow_frame, result_register,
false);
}
- } else {
- DCHECK_EQ(kInterpreterImplKind, kComputedGotoImplKind);
- if (transaction_active) {
- return ExecuteGotoImpl<true, true>(self, code_item, shadow_frame, result_register);
- } else {
- return ExecuteGotoImpl<true, false>(self, code_item, shadow_frame, result_register);
- }
}
}
}
@@ -466,7 +450,7 @@
}
static bool IsStringInit(const Instruction* instr, ArtMethod* caller)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (instr->Opcode() == Instruction::INVOKE_DIRECT ||
instr->Opcode() == Instruction::INVOKE_DIRECT_RANGE) {
// Instead of calling ResolveMethod() which has suspend point and can trigger
@@ -499,7 +483,7 @@
ShadowFrame* shadow_frame,
bool from_code,
JValue* ret_val)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue value;
// Set value to last known result in case the shadow frame chain is empty.
value.SetJ(ret_val->GetJ());
diff --git a/runtime/interpreter/interpreter.h b/runtime/interpreter/interpreter.h
index bf4bcff..38ce851 100644
--- a/runtime/interpreter/interpreter.h
+++ b/runtime/interpreter/interpreter.h
@@ -38,20 +38,20 @@
extern void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method,
mirror::Object* receiver, uint32_t* args, JValue* result,
bool stay_in_interpreter = false)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// 'from_code' denotes whether the deoptimization was explicitly triggered by compiled code.
extern void EnterInterpreterFromDeoptimize(Thread* self, ShadowFrame* shadow_frame, bool from_code,
JValue* ret_val)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
extern JValue EnterInterpreterFromEntryPoint(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void ArtInterpreterToInterpreterBridge(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame* shadow_frame, JValue* result)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// One-time sanity check.
void CheckInterpreterAsmConstants();
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index ac146b3..77c3f0f 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -196,7 +196,7 @@
template<Primitive::Type field_type>
static JValue GetFieldValue(const ShadowFrame& shadow_frame, uint32_t vreg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue field_value;
switch (field_type) {
case Primitive::kPrimBoolean:
@@ -456,7 +456,7 @@
// Assign register 'src_reg' from shadow_frame to register 'dest_reg' into new_shadow_frame.
static inline void AssignRegister(ShadowFrame* new_shadow_frame, const ShadowFrame& shadow_frame,
size_t dest_reg, size_t src_reg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Uint required, so that sign extension does not make this wrong on 64b systems
uint32_t src_value = shadow_frame.GetVReg(src_reg);
mirror::Object* o = shadow_frame.GetVRegReference<kVerifyNone>(src_reg);
@@ -491,7 +491,7 @@
template <bool is_range,
bool do_assignability_check,
size_t kVarArgMax>
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
static inline bool DoCallCommon(ArtMethod* called_method,
Thread* self,
ShadowFrame& shadow_frame,
@@ -505,7 +505,7 @@
const DexFile::CodeItem* code_item,
ShadowFrame* shadow_frame,
JValue* result)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = shadow_frame->GetMethod();
// Ensure static methods are initialized.
if (method->IsStatic()) {
@@ -541,7 +541,7 @@
void SetStringInitValueToAllAliases(ShadowFrame* shadow_frame,
uint16_t this_obj_vreg,
JValue result)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Object* existing = shadow_frame->GetVRegReference(this_obj_vreg);
if (existing == nullptr) {
// If it's null, we come from compiled code that was deoptimized. Nothing to do,
@@ -854,7 +854,7 @@
return true;
}
-// TODO fix thread analysis: should be SHARED_REQUIRES(Locks::mutator_lock_).
+// TODO fix thread analysis: should be REQUIRES_SHARED(Locks::mutator_lock_).
template<typename T>
static void RecordArrayElementsInTransactionImpl(mirror::PrimitiveArray<T>* array, int32_t count)
NO_THREAD_SAFETY_ANALYSIS {
@@ -865,7 +865,7 @@
}
void RecordArrayElementsInTransaction(mirror::Array* array, int32_t count)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(Runtime::Current()->IsActiveTransaction());
DCHECK(array != nullptr);
DCHECK_LE(count, array->GetLength());
@@ -904,7 +904,7 @@
// Explicit DoCall template function declarations.
#define EXPLICIT_DO_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check) \
- template SHARED_REQUIRES(Locks::mutator_lock_) \
+ template REQUIRES_SHARED(Locks::mutator_lock_) \
bool DoCall<_is_range, _do_assignability_check>(ArtMethod* method, Thread* self, \
ShadowFrame& shadow_frame, \
const Instruction* inst, uint16_t inst_data, \
@@ -917,7 +917,7 @@
// Explicit DoFilledNewArray template function declarations.
#define EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(_is_range_, _check, _transaction_active) \
- template SHARED_REQUIRES(Locks::mutator_lock_) \
+ template REQUIRES_SHARED(Locks::mutator_lock_) \
bool DoFilledNewArray<_is_range_, _check, _transaction_active>(const Instruction* inst, \
const ShadowFrame& shadow_frame, \
Thread* self, JValue* result)
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 7b38473..9d76685 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -66,7 +66,7 @@
namespace interpreter {
void ThrowNullPointerExceptionFromInterpreter()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <bool kMonitorCounting>
static inline void DoMonitorEnter(Thread* self,
@@ -108,13 +108,13 @@
void AbortTransactionF(Thread* self, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void AbortTransactionV(Thread* self, const char* fmt, va_list args)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void RecordArrayElementsInTransaction(mirror::Array* array, int32_t count)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Invokes the given method. This is part of the invocation support and is used by DoInvoke and
// DoInvokeVirtualQuick functions.
@@ -213,32 +213,32 @@
// Returns true on success, otherwise throws an exception and returns false.
template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
- uint16_t inst_data) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint16_t inst_data) REQUIRES_SHARED(Locks::mutator_lock_);
// Handles iget-quick, iget-wide-quick and iget-object-quick instructions.
// Returns true on success, otherwise throws an exception and returns false.
template<Primitive::Type field_type>
bool DoIGetQuick(ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Handles iput-XXX and sput-XXX instructions.
// Returns true on success, otherwise throws an exception and returns false.
template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check,
bool transaction_active>
bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, const Instruction* inst,
- uint16_t inst_data) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint16_t inst_data) REQUIRES_SHARED(Locks::mutator_lock_);
// Handles iput-quick, iput-wide-quick and iput-object-quick instructions.
// Returns true on success, otherwise throws an exception and returns false.
template<Primitive::Type field_type, bool transaction_active>
bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Handles string resolution for const-string and const-string-jumbo instructions. Also ensures the
// java.lang.String class is initialized.
static inline String* ResolveString(Thread* self, ShadowFrame& shadow_frame, uint32_t string_idx)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Class* java_lang_string_class = String::GetJavaLangString();
if (UNLIKELY(!java_lang_string_class->IsInitialized())) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -272,7 +272,7 @@
// Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
static inline bool DoIntDivide(ShadowFrame& shadow_frame, size_t result_reg,
int32_t dividend, int32_t divisor)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
constexpr int32_t kMinInt = std::numeric_limits<int32_t>::min();
if (UNLIKELY(divisor == 0)) {
ThrowArithmeticExceptionDivideByZero();
@@ -290,7 +290,7 @@
// Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
static inline bool DoIntRemainder(ShadowFrame& shadow_frame, size_t result_reg,
int32_t dividend, int32_t divisor)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
constexpr int32_t kMinInt = std::numeric_limits<int32_t>::min();
if (UNLIKELY(divisor == 0)) {
ThrowArithmeticExceptionDivideByZero();
@@ -308,7 +308,7 @@
// Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
static inline bool DoLongDivide(ShadowFrame& shadow_frame, size_t result_reg,
int64_t dividend, int64_t divisor)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const int64_t kMinLong = std::numeric_limits<int64_t>::min();
if (UNLIKELY(divisor == 0)) {
ThrowArithmeticExceptionDivideByZero();
@@ -326,7 +326,7 @@
// Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
static inline bool DoLongRemainder(ShadowFrame& shadow_frame, size_t result_reg,
int64_t dividend, int64_t divisor)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const int64_t kMinLong = std::numeric_limits<int64_t>::min();
if (UNLIKELY(divisor == 0)) {
ThrowArithmeticExceptionDivideByZero();
@@ -350,7 +350,7 @@
// Returns the branch offset to the next instruction to execute.
static inline int32_t DoPackedSwitch(const Instruction* inst, const ShadowFrame& shadow_frame,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(inst->Opcode() == Instruction::PACKED_SWITCH);
const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t(inst_data));
@@ -378,7 +378,7 @@
// Returns the branch offset to the next instruction to execute.
static inline int32_t DoSparseSwitch(const Instruction* inst, const ShadowFrame& shadow_frame,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(inst->Opcode() == Instruction::SPARSE_SWITCH);
const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t(inst_data));
@@ -411,18 +411,18 @@
uint32_t FindNextInstructionFollowingException(Thread* self, ShadowFrame& shadow_frame,
uint32_t dex_pc, const instrumentation::Instrumentation* instrumentation)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
NO_RETURN void UnexpectedOpcode(const Instruction* inst, const ShadowFrame& shadow_frame)
__attribute__((cold))
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Set true if you want TraceExecution invocation before each bytecode execution.
constexpr bool kTraceExecutionEnabled = false;
static inline void TraceExecution(const ShadowFrame& shadow_frame, const Instruction* inst,
const uint32_t dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (kTraceExecutionEnabled) {
#define TRACE_LOG std::cerr
std::ostringstream oss;
@@ -465,7 +465,7 @@
// Explicitly instantiate all DoInvoke functions.
#define EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, _is_range, _do_check) \
- template SHARED_REQUIRES(Locks::mutator_lock_) \
+ template REQUIRES_SHARED(Locks::mutator_lock_) \
bool DoInvoke<_type, _is_range, _do_check>(Thread* self, ShadowFrame& shadow_frame, \
const Instruction* inst, uint16_t inst_data, \
JValue* result)
@@ -486,7 +486,7 @@
// Explicitly instantiate all DoInvokeVirtualQuick functions.
#define EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(_is_range) \
- template SHARED_REQUIRES(Locks::mutator_lock_) \
+ template REQUIRES_SHARED(Locks::mutator_lock_) \
bool DoInvokeVirtualQuick<_is_range>(Thread* self, ShadowFrame& shadow_frame, \
const Instruction* inst, uint16_t inst_data, \
JValue* result)
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
deleted file mode 100644
index 37dd63b..0000000
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ /dev/null
@@ -1,2607 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "interpreter_goto_table_impl.h"
-
-// Common includes
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/mutex.h"
-#include "stack.h"
-#include "thread.h"
-
-// Clang compiles the GOTO interpreter very slowly. So we skip it. These are the implementation
-// details only necessary when compiling it.
-#if !defined(__clang__)
-#include "experimental_flags.h"
-#include "interpreter_common.h"
-#include "jit/jit.h"
-#include "safe_math.h"
-#endif
-
-namespace art {
-namespace interpreter {
-
-#if !defined(__clang__)
-
-// In the following macros, we expect the following local variables exist:
-// - "self": the current Thread*.
-// - "inst" : the current Instruction*.
-// - "inst_data" : the current instruction's first 16 bits.
-// - "dex_pc": the current pc.
-// - "shadow_frame": the current shadow frame.
-// - "currentHandlersTable": the current table of pointer to each instruction handler.
-
-// Advance to the next instruction and updates interpreter state.
-#define ADVANCE(_offset) \
- do { \
- int32_t disp = static_cast<int32_t>(_offset); \
- inst = inst->RelativeAt(disp); \
- dex_pc = static_cast<uint32_t>(static_cast<int32_t>(dex_pc) + disp); \
- shadow_frame.SetDexPC(dex_pc); \
- TraceExecution(shadow_frame, inst, dex_pc); \
- inst_data = inst->Fetch16(0); \
- goto *currentHandlersTable[inst->Opcode(inst_data)]; \
- } while (false)
-
-#define HANDLE_PENDING_EXCEPTION() goto exception_pending_label
-
-#define POSSIBLY_HANDLE_PENDING_EXCEPTION(_is_exception_pending, _offset) \
- do { \
- if (UNLIKELY(_is_exception_pending)) { \
- HANDLE_PENDING_EXCEPTION(); \
- } else { \
- ADVANCE(_offset); \
- } \
- } while (false)
-
-#define UPDATE_HANDLER_TABLE() \
- currentHandlersTable = handlersTable[ \
- Runtime::Current()->GetInstrumentation()->GetInterpreterHandlerTable()]
-
-#define BRANCH_INSTRUMENTATION(offset) \
- do { \
- if (UNLIKELY(instrumentation->HasBranchListeners())) { \
- instrumentation->Branch(self, method, dex_pc, offset); \
- } \
- JValue result; \
- if (jit::Jit::MaybeDoOnStackReplacement(self, method, dex_pc, offset, &result)) { \
- return result; \
- } \
- } while (false)
-
-#define HOTNESS_UPDATE() \
- do { \
- if (jit != nullptr) { \
- jit->AddSamples(self, method, 1, /*with_backedges*/ true); \
- } \
- } while (false)
-
-#define UNREACHABLE_CODE_CHECK() \
- do { \
- if (kIsDebugBuild) { \
- LOG(FATAL) << "We should not be here !"; \
- UNREACHABLE(); \
- } \
- } while (false)
-
-#define HANDLE_INSTRUCTION_START(opcode) op_##opcode: // NOLINT(whitespace/labels)
-#define HANDLE_INSTRUCTION_END() UNREACHABLE_CODE_CHECK()
-
-#define HANDLE_MONITOR_CHECKS() \
- if (!DoMonitorCheckOnExit<do_assignability_check>(self, &shadow_frame)) { \
- HANDLE_PENDING_EXCEPTION(); \
- }
-
-/**
- * Interpreter based on computed goto tables.
- *
- * Each instruction is associated to a handler. This handler is responsible for executing the
- * instruction and jump to the next instruction's handler.
- * In order to limit the cost of instrumentation, we have two handler tables:
- * - the "main" handler table: it contains handlers for normal execution of each instruction without
- * handling of instrumentation.
- * - the "alternative" handler table: it contains alternative handlers which first handle
- * instrumentation before jumping to the corresponding "normal" instruction's handler.
- *
- * When instrumentation is active, the interpreter uses the "alternative" handler table. Otherwise
- * it uses the "main" handler table.
- *
- * The current handler table is the handler table being used by the interpreter. It is updated:
- * - on backward branch (goto, if and switch instructions)
- * - after invoke
- * - when an exception is thrown.
- * This allows to support an attaching debugger to an already running application for instance.
- *
- * For a fast handler table update, handler tables are stored in an array of handler tables. Each
- * handler table is represented by the InterpreterHandlerTable enum which allows to associate it
- * to an index in this array of handler tables ((see Instrumentation::GetInterpreterHandlerTable).
- *
- * Here's the current layout of this array of handler tables:
- *
- * ---------------------+---------------+
- * | NOP | (handler for NOP instruction)
- * +---------------+
- * "main" | MOVE | (handler for MOVE instruction)
- * handler table +---------------+
- * | ... |
- * +---------------+
- * | UNUSED_FF | (handler for UNUSED_FF instruction)
- * ---------------------+---------------+
- * | NOP | (alternative handler for NOP instruction)
- * +---------------+
- * "alternative" | MOVE | (alternative handler for MOVE instruction)
- * handler table +---------------+
- * | ... |
- * +---------------+
- * | UNUSED_FF | (alternative handler for UNUSED_FF instruction)
- * ---------------------+---------------+
- *
- */
-template<bool do_access_check, bool transaction_active>
-JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame,
- JValue result_register) {
- // Define handler tables:
- // - The main handler table contains execution handlers for each instruction.
- // - The alternative handler table contains prelude handlers which check for thread suspend and
- // manage instrumentation before jumping to the execution handler.
- static const void* const handlersTable[instrumentation::kNumHandlerTables][kNumPackedOpcodes] = {
- {
- // Main handler table.
-#define INSTRUCTION_HANDLER(o, code, n, f, i, a, v) &&op_##code,
-#include "dex_instruction_list.h"
- DEX_INSTRUCTION_LIST(INSTRUCTION_HANDLER)
-#undef DEX_INSTRUCTION_LIST
-#undef INSTRUCTION_HANDLER
- }, {
- // Alternative handler table.
-#define INSTRUCTION_HANDLER(o, code, n, f, i, a, v) &&alt_op_##code,
-#include "dex_instruction_list.h"
- DEX_INSTRUCTION_LIST(INSTRUCTION_HANDLER)
-#undef DEX_INSTRUCTION_LIST
-#undef INSTRUCTION_HANDLER
- }
- };
-
- constexpr bool do_assignability_check = do_access_check;
- if (UNLIKELY(!shadow_frame.HasReferenceArray())) {
- LOG(FATAL) << "Invalid shadow frame for interpreter use";
- return JValue();
- }
- self->VerifyStack();
-
- uint32_t dex_pc = shadow_frame.GetDexPC();
- const Instruction* inst = Instruction::At(code_item->insns_ + dex_pc);
- uint16_t inst_data;
- const void* const* currentHandlersTable;
- UPDATE_HANDLER_TABLE();
- const auto* const instrumentation = Runtime::Current()->GetInstrumentation();
- ArtMethod* method = shadow_frame.GetMethod();
- jit::Jit* jit = Runtime::Current()->GetJit();
-
- // Jump to first instruction.
- ADVANCE(0);
- UNREACHABLE_CODE_CHECK();
-
- HANDLE_INSTRUCTION_START(NOP)
- ADVANCE(1);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(MOVE)
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
- shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(MOVE_FROM16)
- shadow_frame.SetVReg(inst->VRegA_22x(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22x()));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(MOVE_16)
- shadow_frame.SetVReg(inst->VRegA_32x(),
- shadow_frame.GetVReg(inst->VRegB_32x()));
- ADVANCE(3);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(MOVE_WIDE)
- shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data),
- shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(MOVE_WIDE_FROM16)
- shadow_frame.SetVRegLong(inst->VRegA_22x(inst_data),
- shadow_frame.GetVRegLong(inst->VRegB_22x()));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(MOVE_WIDE_16)
- shadow_frame.SetVRegLong(inst->VRegA_32x(),
- shadow_frame.GetVRegLong(inst->VRegB_32x()));
- ADVANCE(3);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(MOVE_OBJECT)
- shadow_frame.SetVRegReference(inst->VRegA_12x(inst_data),
- shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(MOVE_OBJECT_FROM16)
- shadow_frame.SetVRegReference(inst->VRegA_22x(inst_data),
- shadow_frame.GetVRegReference(inst->VRegB_22x()));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(MOVE_OBJECT_16)
- shadow_frame.SetVRegReference(inst->VRegA_32x(),
- shadow_frame.GetVRegReference(inst->VRegB_32x()));
- ADVANCE(3);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(MOVE_RESULT)
- shadow_frame.SetVReg(inst->VRegA_11x(inst_data), result_register.GetI());
- ADVANCE(1);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(MOVE_RESULT_WIDE)
- shadow_frame.SetVRegLong(inst->VRegA_11x(inst_data), result_register.GetJ());
- ADVANCE(1);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(MOVE_RESULT_OBJECT)
- shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), result_register.GetL());
- ADVANCE(1);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(MOVE_EXCEPTION) {
- Throwable* exception = self->GetException();
- DCHECK(exception != nullptr) << "No pending exception on MOVE_EXCEPTION instruction";
- shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), exception);
- self->ClearException();
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(RETURN_VOID_NO_BARRIER) {
- JValue result;
- self->AllowThreadSuspension();
- HANDLE_MONITOR_CHECKS();
- if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
- instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
- shadow_frame.GetMethod(), dex_pc,
- result);
- }
- return result;
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(RETURN_VOID) {
- QuasiAtomic::ThreadFenceForConstructor();
- JValue result;
- self->AllowThreadSuspension();
- HANDLE_MONITOR_CHECKS();
- if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
- instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
- shadow_frame.GetMethod(), dex_pc,
- result);
- }
- return result;
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(RETURN) {
- JValue result;
- result.SetJ(0);
- result.SetI(shadow_frame.GetVReg(inst->VRegA_11x(inst_data)));
- self->AllowThreadSuspension();
- HANDLE_MONITOR_CHECKS();
- if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
- instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
- shadow_frame.GetMethod(), dex_pc,
- result);
- }
- return result;
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(RETURN_WIDE) {
- JValue result;
- result.SetJ(shadow_frame.GetVRegLong(inst->VRegA_11x(inst_data)));
- self->AllowThreadSuspension();
- HANDLE_MONITOR_CHECKS();
- if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
- instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
- shadow_frame.GetMethod(), dex_pc,
- result);
- }
- return result;
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(RETURN_OBJECT) {
- JValue result;
- self->AllowThreadSuspension();
- HANDLE_MONITOR_CHECKS();
- const uint8_t vreg_index = inst->VRegA_11x(inst_data);
- Object* obj_result = shadow_frame.GetVRegReference(vreg_index);
- if (do_assignability_check && obj_result != nullptr) {
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
- Class* return_type = shadow_frame.GetMethod()->GetReturnType(true /* resolve */,
- pointer_size);
- obj_result = shadow_frame.GetVRegReference(vreg_index);
- if (return_type == nullptr) {
- // Return the pending exception.
- HANDLE_PENDING_EXCEPTION();
- }
- if (!obj_result->VerifierInstanceOf(return_type)) {
- // This should never happen.
- std::string temp1, temp2;
- self->ThrowNewExceptionF("Ljava/lang/VirtualMachineError;",
- "Returning '%s' that is not instance of return type '%s'",
- obj_result->GetClass()->GetDescriptor(&temp1),
- return_type->GetDescriptor(&temp2));
- HANDLE_PENDING_EXCEPTION();
- }
- }
- result.SetL(obj_result);
- if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
- instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
- shadow_frame.GetMethod(), dex_pc,
- result);
- }
- return result;
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(CONST_4) {
- uint32_t dst = inst->VRegA_11n(inst_data);
- int32_t val = inst->VRegB_11n(inst_data);
- shadow_frame.SetVReg(dst, val);
- if (val == 0) {
- shadow_frame.SetVRegReference(dst, nullptr);
- }
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(CONST_16) {
- uint32_t dst = inst->VRegA_21s(inst_data);
- int32_t val = inst->VRegB_21s();
- shadow_frame.SetVReg(dst, val);
- if (val == 0) {
- shadow_frame.SetVRegReference(dst, nullptr);
- }
- ADVANCE(2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(CONST) {
- uint32_t dst = inst->VRegA_31i(inst_data);
- int32_t val = inst->VRegB_31i();
- shadow_frame.SetVReg(dst, val);
- if (val == 0) {
- shadow_frame.SetVRegReference(dst, nullptr);
- }
- ADVANCE(3);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(CONST_HIGH16) {
- uint32_t dst = inst->VRegA_21h(inst_data);
- int32_t val = static_cast<int32_t>(inst->VRegB_21h() << 16);
- shadow_frame.SetVReg(dst, val);
- if (val == 0) {
- shadow_frame.SetVRegReference(dst, nullptr);
- }
- ADVANCE(2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(CONST_WIDE_16)
- shadow_frame.SetVRegLong(inst->VRegA_21s(inst_data), inst->VRegB_21s());
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(CONST_WIDE_32)
- shadow_frame.SetVRegLong(inst->VRegA_31i(inst_data), inst->VRegB_31i());
- ADVANCE(3);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(CONST_WIDE)
- shadow_frame.SetVRegLong(inst->VRegA_51l(inst_data), inst->VRegB_51l());
- ADVANCE(5);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(CONST_WIDE_HIGH16)
- shadow_frame.SetVRegLong(inst->VRegA_21h(inst_data),
- static_cast<uint64_t>(inst->VRegB_21h()) << 48);
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(CONST_STRING) {
- String* s = ResolveString(self, shadow_frame, inst->VRegB_21c());
- if (UNLIKELY(s == nullptr)) {
- HANDLE_PENDING_EXCEPTION();
- } else {
- shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), s);
- ADVANCE(2);
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(CONST_STRING_JUMBO) {
- String* s = ResolveString(self, shadow_frame, inst->VRegB_31c());
- if (UNLIKELY(s == nullptr)) {
- HANDLE_PENDING_EXCEPTION();
- } else {
- shadow_frame.SetVRegReference(inst->VRegA_31c(inst_data), s);
- ADVANCE(3);
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(CONST_CLASS) {
- Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
- self, false, do_access_check);
- if (UNLIKELY(c == nullptr)) {
- HANDLE_PENDING_EXCEPTION();
- } else {
- shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), c);
- ADVANCE(2);
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(MONITOR_ENTER) {
- Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
- if (UNLIKELY(obj == nullptr)) {
- ThrowNullPointerExceptionFromInterpreter();
- HANDLE_PENDING_EXCEPTION();
- } else {
- DoMonitorEnter<do_access_check>(self, &shadow_frame, obj);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), 1);
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(MONITOR_EXIT) {
- Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
- if (UNLIKELY(obj == nullptr)) {
- ThrowNullPointerExceptionFromInterpreter();
- HANDLE_PENDING_EXCEPTION();
- } else {
- DoMonitorExit<do_access_check>(self, &shadow_frame, obj);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), 1);
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(CHECK_CAST) {
- Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
- self, false, do_access_check);
- if (UNLIKELY(c == nullptr)) {
- HANDLE_PENDING_EXCEPTION();
- } else {
- Object* obj = shadow_frame.GetVRegReference(inst->VRegA_21c(inst_data));
- if (UNLIKELY(obj != nullptr && !obj->InstanceOf(c))) {
- ThrowClassCastException(c, obj->GetClass());
- HANDLE_PENDING_EXCEPTION();
- } else {
- ADVANCE(2);
- }
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(INSTANCE_OF) {
- Class* c = ResolveVerifyAndClinit(inst->VRegC_22c(), shadow_frame.GetMethod(),
- self, false, do_access_check);
- if (UNLIKELY(c == nullptr)) {
- HANDLE_PENDING_EXCEPTION();
- } else {
- Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
- shadow_frame.SetVReg(inst->VRegA_22c(inst_data), (obj != nullptr && obj->InstanceOf(c)) ? 1 : 0);
- ADVANCE(2);
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(ARRAY_LENGTH) {
- Object* array = shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data));
- if (UNLIKELY(array == nullptr)) {
- ThrowNullPointerExceptionFromInterpreter();
- HANDLE_PENDING_EXCEPTION();
- } else {
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data), array->AsArray()->GetLength());
- ADVANCE(1);
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(NEW_INSTANCE) {
- Object* obj = nullptr;
- Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
- self, false, do_access_check);
- if (LIKELY(c != nullptr)) {
- if (UNLIKELY(c->IsStringClass())) {
- gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
- obj = mirror::String::AllocEmptyString<true>(self, allocator_type);
- } else {
- obj = AllocObjectFromCode<do_access_check, true>(
- inst->VRegB_21c(), shadow_frame.GetMethod(), self,
- Runtime::Current()->GetHeap()->GetCurrentAllocator());
- }
- }
- if (UNLIKELY(obj == nullptr)) {
- HANDLE_PENDING_EXCEPTION();
- } else {
- obj->GetClass()->AssertInitializedOrInitializingInThread(self);
- // Don't allow finalizable objects to be allocated during a transaction since these can't be
- // finalized without a started runtime.
- if (transaction_active && obj->GetClass()->IsFinalizable()) {
- AbortTransactionF(self, "Allocating finalizable object in transaction: %s",
- PrettyTypeOf(obj).c_str());
- HANDLE_PENDING_EXCEPTION();
- }
- shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), obj);
- ADVANCE(2);
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(NEW_ARRAY) {
- int32_t length = shadow_frame.GetVReg(inst->VRegB_22c(inst_data));
- Object* obj = AllocArrayFromCode<do_access_check, true>(
- inst->VRegC_22c(), length, shadow_frame.GetMethod(), self,
- Runtime::Current()->GetHeap()->GetCurrentAllocator());
- if (UNLIKELY(obj == nullptr)) {
- HANDLE_PENDING_EXCEPTION();
- } else {
- shadow_frame.SetVRegReference(inst->VRegA_22c(inst_data), obj);
- ADVANCE(2);
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(FILLED_NEW_ARRAY) {
- bool success =
- DoFilledNewArray<false, do_access_check, transaction_active>(inst, shadow_frame,
- self, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(FILLED_NEW_ARRAY_RANGE) {
- bool success =
- DoFilledNewArray<true, do_access_check, transaction_active>(inst, shadow_frame,
- self, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(FILL_ARRAY_DATA) {
- Object* obj = shadow_frame.GetVRegReference(inst->VRegA_31t(inst_data));
- const uint16_t* payload_addr = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
- const Instruction::ArrayDataPayload* payload =
- reinterpret_cast<const Instruction::ArrayDataPayload*>(payload_addr);
- bool success = FillArrayData(obj, payload);
- if (transaction_active && success) {
- RecordArrayElementsInTransaction(obj->AsArray(), payload->element_count);
- }
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(THROW) {
- Object* exception = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
- if (UNLIKELY(exception == nullptr)) {
- ThrowNullPointerException("throw with null exception");
- } else if (do_assignability_check && !exception->GetClass()->IsThrowableClass()) {
- // This should never happen.
- std::string temp;
- self->ThrowNewExceptionF("Ljava/lang/VirtualMachineError;",
- "Throwing '%s' that is not instance of Throwable",
- exception->GetClass()->GetDescriptor(&temp));
- } else {
- self->SetException(exception->AsThrowable());
- }
- HANDLE_PENDING_EXCEPTION();
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(GOTO) {
- int8_t offset = inst->VRegA_10t(inst_data);
- BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- if (UNLIKELY(self->TestAllFlags())) {
- self->CheckSuspend();
- UPDATE_HANDLER_TABLE();
- }
- }
- ADVANCE(offset);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(GOTO_16) {
- int16_t offset = inst->VRegA_20t();
- BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- if (UNLIKELY(self->TestAllFlags())) {
- self->CheckSuspend();
- UPDATE_HANDLER_TABLE();
- }
- }
- ADVANCE(offset);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(GOTO_32) {
- int32_t offset = inst->VRegA_30t();
- BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- if (UNLIKELY(self->TestAllFlags())) {
- self->CheckSuspend();
- UPDATE_HANDLER_TABLE();
- }
- }
- ADVANCE(offset);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(PACKED_SWITCH) {
- int32_t offset = DoPackedSwitch(inst, shadow_frame, inst_data);
- BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- if (UNLIKELY(self->TestAllFlags())) {
- self->CheckSuspend();
- UPDATE_HANDLER_TABLE();
- }
- }
- ADVANCE(offset);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SPARSE_SWITCH) {
- int32_t offset = DoSparseSwitch(inst, shadow_frame, inst_data);
- BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- if (UNLIKELY(self->TestAllFlags())) {
- self->CheckSuspend();
- UPDATE_HANDLER_TABLE();
- }
- }
- ADVANCE(offset);
- }
- HANDLE_INSTRUCTION_END();
-
-#if defined(__clang__)
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wfloat-equal"
-#endif
-
- HANDLE_INSTRUCTION_START(CMPL_FLOAT) {
- float val1 = shadow_frame.GetVRegFloat(inst->VRegB_23x());
- float val2 = shadow_frame.GetVRegFloat(inst->VRegC_23x());
- int32_t result;
- if (val1 > val2) {
- result = 1;
- } else if (val1 == val2) {
- result = 0;
- } else {
- result = -1;
- }
- shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
- ADVANCE(2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(CMPG_FLOAT) {
- float val1 = shadow_frame.GetVRegFloat(inst->VRegB_23x());
- float val2 = shadow_frame.GetVRegFloat(inst->VRegC_23x());
- int32_t result;
- if (val1 < val2) {
- result = -1;
- } else if (val1 == val2) {
- result = 0;
- } else {
- result = 1;
- }
- shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
- ADVANCE(2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(CMPL_DOUBLE) {
- double val1 = shadow_frame.GetVRegDouble(inst->VRegB_23x());
- double val2 = shadow_frame.GetVRegDouble(inst->VRegC_23x());
- int32_t result;
- if (val1 > val2) {
- result = 1;
- } else if (val1 == val2) {
- result = 0;
- } else {
- result = -1;
- }
- shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
- ADVANCE(2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(CMPG_DOUBLE) {
- double val1 = shadow_frame.GetVRegDouble(inst->VRegB_23x());
- double val2 = shadow_frame.GetVRegDouble(inst->VRegC_23x());
- int32_t result;
- if (val1 < val2) {
- result = -1;
- } else if (val1 == val2) {
- result = 0;
- } else {
- result = 1;
- }
- shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
- ADVANCE(2);
- }
- HANDLE_INSTRUCTION_END();
-
-#if defined(__clang__)
-#pragma clang diagnostic pop
-#endif
-
- HANDLE_INSTRUCTION_START(CMP_LONG) {
- int64_t val1 = shadow_frame.GetVRegLong(inst->VRegB_23x());
- int64_t val2 = shadow_frame.GetVRegLong(inst->VRegC_23x());
- int32_t result;
- if (val1 > val2) {
- result = 1;
- } else if (val1 == val2) {
- result = 0;
- } else {
- result = -1;
- }
- shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
- ADVANCE(2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IF_EQ) {
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) == shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
- int16_t offset = inst->VRegC_22t();
- BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- if (UNLIKELY(self->TestAllFlags())) {
- self->CheckSuspend();
- UPDATE_HANDLER_TABLE();
- }
- }
- ADVANCE(offset);
- } else {
- BRANCH_INSTRUMENTATION(2);
- ADVANCE(2);
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IF_NE) {
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) !=
- shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
- int16_t offset = inst->VRegC_22t();
- BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- if (UNLIKELY(self->TestAllFlags())) {
- self->CheckSuspend();
- UPDATE_HANDLER_TABLE();
- }
- }
- ADVANCE(offset);
- } else {
- BRANCH_INSTRUMENTATION(2);
- ADVANCE(2);
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IF_LT) {
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <
- shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
- int16_t offset = inst->VRegC_22t();
- BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- if (UNLIKELY(self->TestAllFlags())) {
- self->CheckSuspend();
- UPDATE_HANDLER_TABLE();
- }
- }
- ADVANCE(offset);
- } else {
- BRANCH_INSTRUMENTATION(2);
- ADVANCE(2);
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IF_GE) {
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >=
- shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
- int16_t offset = inst->VRegC_22t();
- BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- if (UNLIKELY(self->TestAllFlags())) {
- self->CheckSuspend();
- UPDATE_HANDLER_TABLE();
- }
- }
- ADVANCE(offset);
- } else {
- BRANCH_INSTRUMENTATION(2);
- ADVANCE(2);
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IF_GT) {
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >
- shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
- int16_t offset = inst->VRegC_22t();
- BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- if (UNLIKELY(self->TestAllFlags())) {
- self->CheckSuspend();
- UPDATE_HANDLER_TABLE();
- }
- }
- ADVANCE(offset);
- } else {
- BRANCH_INSTRUMENTATION(2);
- ADVANCE(2);
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IF_LE) {
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <=
- shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
- int16_t offset = inst->VRegC_22t();
- BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- if (UNLIKELY(self->TestAllFlags())) {
- self->CheckSuspend();
- UPDATE_HANDLER_TABLE();
- }
- }
- ADVANCE(offset);
- } else {
- BRANCH_INSTRUMENTATION(2);
- ADVANCE(2);
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IF_EQZ) {
- if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) == 0) {
- int16_t offset = inst->VRegB_21t();
- BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- if (UNLIKELY(self->TestAllFlags())) {
- self->CheckSuspend();
- UPDATE_HANDLER_TABLE();
- }
- }
- ADVANCE(offset);
- } else {
- BRANCH_INSTRUMENTATION(2);
- ADVANCE(2);
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IF_NEZ) {
- if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) != 0) {
- int16_t offset = inst->VRegB_21t();
- BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- if (UNLIKELY(self->TestAllFlags())) {
- self->CheckSuspend();
- UPDATE_HANDLER_TABLE();
- }
- }
- ADVANCE(offset);
- } else {
- BRANCH_INSTRUMENTATION(2);
- ADVANCE(2);
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IF_LTZ) {
- if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) < 0) {
- int16_t offset = inst->VRegB_21t();
- BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- if (UNLIKELY(self->TestAllFlags())) {
- self->CheckSuspend();
- UPDATE_HANDLER_TABLE();
- }
- }
- ADVANCE(offset);
- } else {
- BRANCH_INSTRUMENTATION(2);
- ADVANCE(2);
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IF_GEZ) {
- if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) >= 0) {
- int16_t offset = inst->VRegB_21t();
- BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- if (UNLIKELY(self->TestAllFlags())) {
- self->CheckSuspend();
- UPDATE_HANDLER_TABLE();
- }
- }
- ADVANCE(offset);
- } else {
- BRANCH_INSTRUMENTATION(2);
- ADVANCE(2);
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IF_GTZ) {
- if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) > 0) {
- int16_t offset = inst->VRegB_21t();
- BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- if (UNLIKELY(self->TestAllFlags())) {
- self->CheckSuspend();
- UPDATE_HANDLER_TABLE();
- }
- }
- ADVANCE(offset);
- } else {
- BRANCH_INSTRUMENTATION(2);
- ADVANCE(2);
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IF_LEZ) {
- if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) <= 0) {
- int16_t offset = inst->VRegB_21t();
- BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- if (UNLIKELY(self->TestAllFlags())) {
- self->CheckSuspend();
- UPDATE_HANDLER_TABLE();
- }
- }
- ADVANCE(offset);
- } else {
- BRANCH_INSTRUMENTATION(2);
- ADVANCE(2);
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(AGET_BOOLEAN) {
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == nullptr)) {
- ThrowNullPointerExceptionFromInterpreter();
- HANDLE_PENDING_EXCEPTION();
- } else {
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- BooleanArray* array = a->AsBooleanArray();
- if (LIKELY(array->CheckIsValidIndex(index))) {
- shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
- ADVANCE(2);
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(AGET_BYTE) {
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == nullptr)) {
- ThrowNullPointerExceptionFromInterpreter();
- HANDLE_PENDING_EXCEPTION();
- } else {
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- ByteArray* array = a->AsByteArray();
- if (LIKELY(array->CheckIsValidIndex(index))) {
- shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
- ADVANCE(2);
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(AGET_CHAR) {
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == nullptr)) {
- ThrowNullPointerExceptionFromInterpreter();
- HANDLE_PENDING_EXCEPTION();
- } else {
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- CharArray* array = a->AsCharArray();
- if (LIKELY(array->CheckIsValidIndex(index))) {
- shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
- ADVANCE(2);
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(AGET_SHORT) {
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == nullptr)) {
- ThrowNullPointerExceptionFromInterpreter();
- HANDLE_PENDING_EXCEPTION();
- } else {
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- ShortArray* array = a->AsShortArray();
- if (LIKELY(array->CheckIsValidIndex(index))) {
- shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
- ADVANCE(2);
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(AGET) {
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == nullptr)) {
- ThrowNullPointerExceptionFromInterpreter();
- HANDLE_PENDING_EXCEPTION();
- } else {
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- DCHECK(a->IsIntArray() || a->IsFloatArray()) << PrettyTypeOf(a);
- auto* array = down_cast<IntArray*>(a);
- if (LIKELY(array->CheckIsValidIndex(index))) {
- shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
- ADVANCE(2);
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(AGET_WIDE) {
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == nullptr)) {
- ThrowNullPointerExceptionFromInterpreter();
- HANDLE_PENDING_EXCEPTION();
- } else {
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- DCHECK(a->IsLongArray() || a->IsDoubleArray()) << PrettyTypeOf(a);
- auto* array = down_cast<LongArray*>(a);
- if (LIKELY(array->CheckIsValidIndex(index))) {
- shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
- ADVANCE(2);
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(AGET_OBJECT) {
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == nullptr)) {
- ThrowNullPointerExceptionFromInterpreter();
- HANDLE_PENDING_EXCEPTION();
- } else {
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- ObjectArray<Object>* array = a->AsObjectArray<Object>();
- if (LIKELY(array->CheckIsValidIndex(index))) {
- shadow_frame.SetVRegReference(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
- ADVANCE(2);
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(APUT_BOOLEAN) {
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == nullptr)) {
- ThrowNullPointerExceptionFromInterpreter();
- HANDLE_PENDING_EXCEPTION();
- } else {
- uint8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- BooleanArray* array = a->AsBooleanArray();
- if (LIKELY(array->CheckIsValidIndex(index))) {
- array->SetWithoutChecks<transaction_active>(index, val);
- ADVANCE(2);
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(APUT_BYTE) {
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == nullptr)) {
- ThrowNullPointerExceptionFromInterpreter();
- HANDLE_PENDING_EXCEPTION();
- } else {
- int8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- ByteArray* array = a->AsByteArray();
- if (LIKELY(array->CheckIsValidIndex(index))) {
- array->SetWithoutChecks<transaction_active>(index, val);
- ADVANCE(2);
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(APUT_CHAR) {
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == nullptr)) {
- ThrowNullPointerExceptionFromInterpreter();
- HANDLE_PENDING_EXCEPTION();
- } else {
- uint16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- CharArray* array = a->AsCharArray();
- if (LIKELY(array->CheckIsValidIndex(index))) {
- array->SetWithoutChecks<transaction_active>(index, val);
- ADVANCE(2);
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(APUT_SHORT) {
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == nullptr)) {
- ThrowNullPointerExceptionFromInterpreter();
- HANDLE_PENDING_EXCEPTION();
- } else {
- int16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- ShortArray* array = a->AsShortArray();
- if (LIKELY(array->CheckIsValidIndex(index))) {
- array->SetWithoutChecks<transaction_active>(index, val);
- ADVANCE(2);
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(APUT) {
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == nullptr)) {
- ThrowNullPointerExceptionFromInterpreter();
- HANDLE_PENDING_EXCEPTION();
- } else {
- int32_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- DCHECK(a->IsIntArray() || a->IsFloatArray()) << PrettyTypeOf(a);
- auto* array = down_cast<IntArray*>(a);
- if (LIKELY(array->CheckIsValidIndex(index))) {
- array->SetWithoutChecks<transaction_active>(index, val);
- ADVANCE(2);
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(APUT_WIDE) {
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == nullptr)) {
- ThrowNullPointerExceptionFromInterpreter();
- HANDLE_PENDING_EXCEPTION();
- } else {
- int64_t val = shadow_frame.GetVRegLong(inst->VRegA_23x(inst_data));
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- DCHECK(a->IsLongArray() || a->IsDoubleArray()) << PrettyTypeOf(a);
- auto* array = down_cast<LongArray*>(a);
- if (LIKELY(array->CheckIsValidIndex(index))) {
- array->SetWithoutChecks<transaction_active>(index, val);
- ADVANCE(2);
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(APUT_OBJECT) {
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == nullptr)) {
- ThrowNullPointerExceptionFromInterpreter();
- HANDLE_PENDING_EXCEPTION();
- } else {
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- Object* val = shadow_frame.GetVRegReference(inst->VRegA_23x(inst_data));
- ObjectArray<Object>* array = a->AsObjectArray<Object>();
- if (LIKELY(array->CheckIsValidIndex(index) && array->CheckAssignable(val))) {
- array->SetWithoutChecks<transaction_active>(index, val);
- ADVANCE(2);
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- }
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IGET_BOOLEAN) {
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean, do_access_check>(
- self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IGET_BYTE) {
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte, do_access_check>(
- self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IGET_CHAR) {
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar, do_access_check>(
- self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IGET_SHORT) {
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort, do_access_check>(
- self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IGET) {
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt, do_access_check>(
- self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IGET_WIDE) {
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong, do_access_check>(
- self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IGET_OBJECT) {
- bool success = DoFieldGet<InstanceObjectRead, Primitive::kPrimNot, do_access_check>(
- self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IGET_QUICK) {
- bool success = DoIGetQuick<Primitive::kPrimInt>(shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IGET_BOOLEAN_QUICK) {
- bool success = DoIGetQuick<Primitive::kPrimBoolean>(shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IGET_BYTE_QUICK) {
- bool success = DoIGetQuick<Primitive::kPrimByte>(shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IGET_CHAR_QUICK) {
- bool success = DoIGetQuick<Primitive::kPrimChar>(shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IGET_SHORT_QUICK) {
- bool success = DoIGetQuick<Primitive::kPrimShort>(shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IGET_WIDE_QUICK) {
- bool success = DoIGetQuick<Primitive::kPrimLong>(shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IGET_OBJECT_QUICK) {
- bool success = DoIGetQuick<Primitive::kPrimNot>(shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SGET_BOOLEAN) {
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check>(
- self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SGET_BYTE) {
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte, do_access_check>(
- self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SGET_CHAR) {
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar, do_access_check>(
- self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SGET_SHORT) {
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort, do_access_check>(
- self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SGET) {
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt, do_access_check>(
- self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SGET_WIDE) {
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong, do_access_check>(
- self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SGET_OBJECT) {
- bool success = DoFieldGet<StaticObjectRead, Primitive::kPrimNot, do_access_check>(
- self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IPUT_BOOLEAN) {
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean, do_access_check,
- transaction_active>(self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IPUT_BYTE) {
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte, do_access_check,
- transaction_active>(self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IPUT_CHAR) {
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar, do_access_check,
- transaction_active>(self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IPUT_SHORT) {
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort, do_access_check,
- transaction_active>(self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IPUT) {
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt, do_access_check,
- transaction_active>(self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IPUT_WIDE) {
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong, do_access_check,
- transaction_active>(self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IPUT_OBJECT) {
- bool success = DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, do_access_check,
- transaction_active>(self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IPUT_QUICK) {
- bool success = DoIPutQuick<Primitive::kPrimInt, transaction_active>(
- shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IPUT_BOOLEAN_QUICK) {
- bool success = DoIPutQuick<Primitive::kPrimBoolean, transaction_active>(
- shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IPUT_BYTE_QUICK) {
- bool success = DoIPutQuick<Primitive::kPrimByte, transaction_active>(
- shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IPUT_CHAR_QUICK) {
- bool success = DoIPutQuick<Primitive::kPrimChar, transaction_active>(
- shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IPUT_SHORT_QUICK) {
- bool success = DoIPutQuick<Primitive::kPrimShort, transaction_active>(
- shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IPUT_WIDE_QUICK) {
- bool success = DoIPutQuick<Primitive::kPrimLong, transaction_active>(
- shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(IPUT_OBJECT_QUICK) {
- bool success = DoIPutQuick<Primitive::kPrimNot, transaction_active>(
- shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SPUT_BOOLEAN) {
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean, do_access_check,
- transaction_active>(self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SPUT_BYTE) {
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte, do_access_check,
- transaction_active>(self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SPUT_CHAR) {
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar, do_access_check,
- transaction_active>(self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SPUT_SHORT) {
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort, do_access_check,
- transaction_active>(self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SPUT) {
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt, do_access_check,
- transaction_active>(self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SPUT_WIDE) {
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong, do_access_check,
- transaction_active>(self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SPUT_OBJECT) {
- bool success = DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, do_access_check,
- transaction_active>(self, shadow_frame, inst, inst_data);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(INVOKE_VIRTUAL) {
- bool success = DoInvoke<kVirtual, false, do_access_check>(
- self, shadow_frame, inst, inst_data, &result_register);
- UPDATE_HANDLER_TABLE();
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(INVOKE_VIRTUAL_RANGE) {
- bool success = DoInvoke<kVirtual, true, do_access_check>(
- self, shadow_frame, inst, inst_data, &result_register);
- UPDATE_HANDLER_TABLE();
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(INVOKE_SUPER) {
- bool success = DoInvoke<kSuper, false, do_access_check>(
- self, shadow_frame, inst, inst_data, &result_register);
- UPDATE_HANDLER_TABLE();
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(INVOKE_SUPER_RANGE) {
- bool success = DoInvoke<kSuper, true, do_access_check>(
- self, shadow_frame, inst, inst_data, &result_register);
- UPDATE_HANDLER_TABLE();
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(INVOKE_DIRECT) {
- bool success = DoInvoke<kDirect, false, do_access_check>(
- self, shadow_frame, inst, inst_data, &result_register);
- UPDATE_HANDLER_TABLE();
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(INVOKE_DIRECT_RANGE) {
- bool success = DoInvoke<kDirect, true, do_access_check>(
- self, shadow_frame, inst, inst_data, &result_register);
- UPDATE_HANDLER_TABLE();
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(INVOKE_INTERFACE) {
- bool success = DoInvoke<kInterface, false, do_access_check>(
- self, shadow_frame, inst, inst_data, &result_register);
- UPDATE_HANDLER_TABLE();
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(INVOKE_INTERFACE_RANGE) {
- bool success = DoInvoke<kInterface, true, do_access_check>(
- self, shadow_frame, inst, inst_data, &result_register);
- UPDATE_HANDLER_TABLE();
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(INVOKE_STATIC) {
- bool success = DoInvoke<kStatic, false, do_access_check>(
- self, shadow_frame, inst, inst_data, &result_register);
- UPDATE_HANDLER_TABLE();
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(INVOKE_STATIC_RANGE) {
- bool success = DoInvoke<kStatic, true, do_access_check>(
- self, shadow_frame, inst, inst_data, &result_register);
- UPDATE_HANDLER_TABLE();
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(INVOKE_VIRTUAL_QUICK) {
- bool success = DoInvokeVirtualQuick<false>(
- self, shadow_frame, inst, inst_data, &result_register);
- UPDATE_HANDLER_TABLE();
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(INVOKE_VIRTUAL_RANGE_QUICK) {
- bool success = DoInvokeVirtualQuick<true>(
- self, shadow_frame, inst, inst_data, &result_register);
- UPDATE_HANDLER_TABLE();
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(NEG_INT)
- shadow_frame.SetVReg(
- inst->VRegA_12x(inst_data), -shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(NOT_INT)
- shadow_frame.SetVReg(
- inst->VRegA_12x(inst_data), ~shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(NEG_LONG)
- shadow_frame.SetVRegLong(
- inst->VRegA_12x(inst_data), -shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(NOT_LONG)
- shadow_frame.SetVRegLong(
- inst->VRegA_12x(inst_data), ~shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(NEG_FLOAT)
- shadow_frame.SetVRegFloat(
- inst->VRegA_12x(inst_data), -shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(NEG_DOUBLE)
- shadow_frame.SetVRegDouble(
- inst->VRegA_12x(inst_data), -shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(INT_TO_LONG)
- shadow_frame.SetVRegLong(
- inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(INT_TO_FLOAT)
- shadow_frame.SetVRegFloat(
- inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(INT_TO_DOUBLE)
- shadow_frame.SetVRegDouble(
- inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(LONG_TO_INT)
- shadow_frame.SetVReg(
- inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(LONG_TO_FLOAT)
- shadow_frame.SetVRegFloat(
- inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(LONG_TO_DOUBLE)
- shadow_frame.SetVRegDouble(
- inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(FLOAT_TO_INT) {
- float val = shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data));
- int32_t result = art_float_to_integral<int32_t, float>(val);
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data), result);
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(FLOAT_TO_LONG) {
- float val = shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data));
- int64_t result = art_float_to_integral<int64_t, float>(val);
- shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), result);
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(FLOAT_TO_DOUBLE)
- shadow_frame.SetVRegDouble(
- inst->VRegA_12x(inst_data), shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(DOUBLE_TO_INT) {
- double val = shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data));
- int32_t result = art_float_to_integral<int32_t, double>(val);
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data), result);
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(DOUBLE_TO_LONG) {
- double val = shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data));
- int64_t result = art_float_to_integral<int64_t, double>(val);
- shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), result);
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(DOUBLE_TO_FLOAT)
- shadow_frame.SetVRegFloat(
- inst->VRegA_12x(inst_data), shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(INT_TO_BYTE)
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
- static_cast<int8_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
- ADVANCE(1);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(INT_TO_CHAR)
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
- static_cast<uint16_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
- ADVANCE(1);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(INT_TO_SHORT)
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
- static_cast<int16_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
- ADVANCE(1);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(ADD_INT)
- shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
- SafeAdd(shadow_frame.GetVReg(inst->VRegB_23x()),
- shadow_frame.GetVReg(inst->VRegC_23x())));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SUB_INT)
- shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
- SafeSub(shadow_frame.GetVReg(inst->VRegB_23x()),
- shadow_frame.GetVReg(inst->VRegC_23x())));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(MUL_INT)
- shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
- SafeMul(shadow_frame.GetVReg(inst->VRegB_23x()),
- shadow_frame.GetVReg(inst->VRegC_23x())));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(DIV_INT) {
- bool success = DoIntDivide(shadow_frame, inst->VRegA_23x(inst_data),
- shadow_frame.GetVReg(inst->VRegB_23x()),
- shadow_frame.GetVReg(inst->VRegC_23x()));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(REM_INT) {
- bool success = DoIntRemainder(shadow_frame, inst->VRegA_23x(inst_data),
- shadow_frame.GetVReg(inst->VRegB_23x()),
- shadow_frame.GetVReg(inst->VRegC_23x()));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SHL_INT)
- shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
- shadow_frame.GetVReg(inst->VRegB_23x()) <<
- (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SHR_INT)
- shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
- shadow_frame.GetVReg(inst->VRegB_23x()) >>
- (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(USHR_INT)
- shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
- static_cast<uint32_t>(shadow_frame.GetVReg(inst->VRegB_23x())) >>
- (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(AND_INT)
- shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
- shadow_frame.GetVReg(inst->VRegB_23x()) &
- shadow_frame.GetVReg(inst->VRegC_23x()));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(OR_INT)
- shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
- shadow_frame.GetVReg(inst->VRegB_23x()) |
- shadow_frame.GetVReg(inst->VRegC_23x()));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(XOR_INT)
- shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
- shadow_frame.GetVReg(inst->VRegB_23x()) ^
- shadow_frame.GetVReg(inst->VRegC_23x()));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(ADD_LONG)
- shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
- SafeAdd(shadow_frame.GetVRegLong(inst->VRegB_23x()),
- shadow_frame.GetVRegLong(inst->VRegC_23x())));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SUB_LONG)
- shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
- SafeSub(shadow_frame.GetVRegLong(inst->VRegB_23x()),
- shadow_frame.GetVRegLong(inst->VRegC_23x())));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(MUL_LONG)
- shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
- SafeMul(shadow_frame.GetVRegLong(inst->VRegB_23x()),
- shadow_frame.GetVRegLong(inst->VRegC_23x())));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(DIV_LONG) {
- bool success = DoLongDivide(shadow_frame, inst->VRegA_23x(inst_data),
- shadow_frame.GetVRegLong(inst->VRegB_23x()),
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(REM_LONG) {
- bool success = DoLongRemainder(shadow_frame, inst->VRegA_23x(inst_data),
- shadow_frame.GetVRegLong(inst->VRegB_23x()),
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(AND_LONG)
- shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) &
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(OR_LONG)
- shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) |
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(XOR_LONG)
- shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) ^
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SHL_LONG)
- shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) <<
- (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SHR_LONG)
- shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) >>
- (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(USHR_LONG)
- shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
- static_cast<uint64_t>(shadow_frame.GetVRegLong(inst->VRegB_23x())) >>
- (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(ADD_FLOAT)
- shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
- shadow_frame.GetVRegFloat(inst->VRegB_23x()) +
- shadow_frame.GetVRegFloat(inst->VRegC_23x()));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SUB_FLOAT)
- shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
- shadow_frame.GetVRegFloat(inst->VRegB_23x()) -
- shadow_frame.GetVRegFloat(inst->VRegC_23x()));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(MUL_FLOAT)
- shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
- shadow_frame.GetVRegFloat(inst->VRegB_23x()) *
- shadow_frame.GetVRegFloat(inst->VRegC_23x()));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(DIV_FLOAT)
- shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
- shadow_frame.GetVRegFloat(inst->VRegB_23x()) /
- shadow_frame.GetVRegFloat(inst->VRegC_23x()));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(REM_FLOAT)
- shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
- fmodf(shadow_frame.GetVRegFloat(inst->VRegB_23x()),
- shadow_frame.GetVRegFloat(inst->VRegC_23x())));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(ADD_DOUBLE)
- shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
- shadow_frame.GetVRegDouble(inst->VRegB_23x()) +
- shadow_frame.GetVRegDouble(inst->VRegC_23x()));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SUB_DOUBLE)
- shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
- shadow_frame.GetVRegDouble(inst->VRegB_23x()) -
- shadow_frame.GetVRegDouble(inst->VRegC_23x()));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(MUL_DOUBLE)
- shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
- shadow_frame.GetVRegDouble(inst->VRegB_23x()) *
- shadow_frame.GetVRegDouble(inst->VRegC_23x()));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(DIV_DOUBLE)
- shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
- shadow_frame.GetVRegDouble(inst->VRegB_23x()) /
- shadow_frame.GetVRegDouble(inst->VRegC_23x()));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(REM_DOUBLE)
- shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
- fmod(shadow_frame.GetVRegDouble(inst->VRegB_23x()),
- shadow_frame.GetVRegDouble(inst->VRegC_23x())));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(ADD_INT_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVReg(vregA,
- SafeAdd(shadow_frame.GetVReg(vregA),
- shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SUB_INT_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVReg(vregA,
- SafeSub(shadow_frame.GetVReg(vregA),
- shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(MUL_INT_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVReg(vregA,
- SafeMul(shadow_frame.GetVReg(vregA),
- shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(DIV_INT_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- bool success = DoIntDivide(shadow_frame, vregA, shadow_frame.GetVReg(vregA),
- shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(REM_INT_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- bool success = DoIntRemainder(shadow_frame, vregA, shadow_frame.GetVReg(vregA),
- shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SHL_INT_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) <<
- (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x1f));
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SHR_INT_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) >>
- (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x1f));
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(USHR_INT_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVReg(vregA,
- static_cast<uint32_t>(shadow_frame.GetVReg(vregA)) >>
- (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x1f));
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(AND_INT_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) &
- shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(OR_INT_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) |
- shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(XOR_INT_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) ^
- shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(ADD_LONG_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVRegLong(vregA,
- SafeAdd(shadow_frame.GetVRegLong(vregA),
- shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data))));
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SUB_LONG_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVRegLong(vregA,
- SafeSub(shadow_frame.GetVRegLong(vregA),
- shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data))));
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(MUL_LONG_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVRegLong(vregA,
- SafeMul(shadow_frame.GetVRegLong(vregA),
- shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data))));
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(DIV_LONG_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- bool success = DoLongDivide(shadow_frame, vregA, shadow_frame.GetVRegLong(vregA),
- shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(REM_LONG_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- bool success = DoLongRemainder(shadow_frame, vregA, shadow_frame.GetVRegLong(vregA),
- shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(AND_LONG_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) &
- shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(OR_LONG_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) |
- shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(XOR_LONG_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) ^
- shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SHL_LONG_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) <<
- (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x3f));
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SHR_LONG_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) >>
- (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x3f));
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(USHR_LONG_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVRegLong(vregA,
- static_cast<uint64_t>(shadow_frame.GetVRegLong(vregA)) >>
- (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x3f));
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(ADD_FLOAT_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVRegFloat(vregA,
- shadow_frame.GetVRegFloat(vregA) +
- shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SUB_FLOAT_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVRegFloat(vregA,
- shadow_frame.GetVRegFloat(vregA) -
- shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(MUL_FLOAT_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVRegFloat(vregA,
- shadow_frame.GetVRegFloat(vregA) *
- shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(DIV_FLOAT_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVRegFloat(vregA,
- shadow_frame.GetVRegFloat(vregA) /
- shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(REM_FLOAT_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVRegFloat(vregA,
- fmodf(shadow_frame.GetVRegFloat(vregA),
- shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data))));
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(ADD_DOUBLE_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVRegDouble(vregA,
- shadow_frame.GetVRegDouble(vregA) +
- shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SUB_DOUBLE_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVRegDouble(vregA,
- shadow_frame.GetVRegDouble(vregA) -
- shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(MUL_DOUBLE_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVRegDouble(vregA,
- shadow_frame.GetVRegDouble(vregA) *
- shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(DIV_DOUBLE_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVRegDouble(vregA,
- shadow_frame.GetVRegDouble(vregA) /
- shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(REM_DOUBLE_2ADDR) {
- uint32_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVRegDouble(vregA,
- fmod(shadow_frame.GetVRegDouble(vregA),
- shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data))));
- ADVANCE(1);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(ADD_INT_LIT16)
- shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
- SafeAdd(shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
- inst->VRegC_22s()));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(RSUB_INT)
- shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
- SafeSub(inst->VRegC_22s(),
- shadow_frame.GetVReg(inst->VRegB_22s(inst_data))));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(MUL_INT_LIT16)
- shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
- SafeMul(shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
- inst->VRegC_22s()));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(DIV_INT_LIT16) {
- bool success = DoIntDivide(
- shadow_frame, inst->VRegA_22s(inst_data), shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
- inst->VRegC_22s());
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(REM_INT_LIT16) {
- bool success = DoIntRemainder(
- shadow_frame, inst->VRegA_22s(inst_data), shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
- inst->VRegC_22s());
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(AND_INT_LIT16)
- shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) &
- inst->VRegC_22s());
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(OR_INT_LIT16)
- shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) |
- inst->VRegC_22s());
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(XOR_INT_LIT16)
- shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) ^
- inst->VRegC_22s());
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(ADD_INT_LIT8)
- shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
- SafeAdd(shadow_frame.GetVReg(inst->VRegB_22b()),
- inst->VRegC_22b()));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(RSUB_INT_LIT8)
- shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
- SafeSub(inst->VRegC_22b(),
- shadow_frame.GetVReg(inst->VRegB_22b())));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(MUL_INT_LIT8)
- shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
- SafeMul(shadow_frame.GetVReg(inst->VRegB_22b()),
- inst->VRegC_22b()));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(DIV_INT_LIT8) {
- bool success = DoIntDivide(shadow_frame, inst->VRegA_22b(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b());
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(REM_INT_LIT8) {
- bool success = DoIntRemainder(shadow_frame, inst->VRegA_22b(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b());
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
- }
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(AND_INT_LIT8)
- shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22b()) &
- inst->VRegC_22b());
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(OR_INT_LIT8)
- shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22b()) |
- inst->VRegC_22b());
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(XOR_INT_LIT8)
- shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22b()) ^
- inst->VRegC_22b());
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SHL_INT_LIT8)
- shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22b()) <<
- (inst->VRegC_22b() & 0x1f));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(SHR_INT_LIT8)
- shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22b()) >>
- (inst->VRegC_22b() & 0x1f));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(USHR_INT_LIT8)
- shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
- static_cast<uint32_t>(shadow_frame.GetVReg(inst->VRegB_22b())) >>
- (inst->VRegC_22b() & 0x1f));
- ADVANCE(2);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(UNUSED_3E)
- UnexpectedOpcode(inst, shadow_frame);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(UNUSED_3F)
- UnexpectedOpcode(inst, shadow_frame);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(UNUSED_40)
- UnexpectedOpcode(inst, shadow_frame);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(UNUSED_41)
- UnexpectedOpcode(inst, shadow_frame);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(UNUSED_42)
- UnexpectedOpcode(inst, shadow_frame);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(UNUSED_43)
- UnexpectedOpcode(inst, shadow_frame);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(UNUSED_79)
- UnexpectedOpcode(inst, shadow_frame);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(UNUSED_7A)
- UnexpectedOpcode(inst, shadow_frame);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(UNUSED_F3)
- UnexpectedOpcode(inst, shadow_frame);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(UNUSED_F4)
- UnexpectedOpcode(inst, shadow_frame);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(UNUSED_F5)
- UnexpectedOpcode(inst, shadow_frame);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(UNUSED_F6)
- UnexpectedOpcode(inst, shadow_frame);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(UNUSED_F7)
- UnexpectedOpcode(inst, shadow_frame);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(UNUSED_F8)
- UnexpectedOpcode(inst, shadow_frame);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(UNUSED_F9)
- UnexpectedOpcode(inst, shadow_frame);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(UNUSED_FA)
- UnexpectedOpcode(inst, shadow_frame);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(UNUSED_FB)
- UnexpectedOpcode(inst, shadow_frame);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(UNUSED_FC)
- UnexpectedOpcode(inst, shadow_frame);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(UNUSED_FD)
- UnexpectedOpcode(inst, shadow_frame);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(UNUSED_FE)
- UnexpectedOpcode(inst, shadow_frame);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(UNUSED_FF)
- UnexpectedOpcode(inst, shadow_frame);
- HANDLE_INSTRUCTION_END();
-
- exception_pending_label: {
- CHECK(self->IsExceptionPending());
- if (UNLIKELY(self->TestAllFlags())) {
- self->CheckSuspend();
- UPDATE_HANDLER_TABLE();
- }
- uint32_t found_dex_pc = FindNextInstructionFollowingException(self, shadow_frame, dex_pc,
- instrumentation);
- if (found_dex_pc == DexFile::kDexNoIndex) {
- // Structured locking is to be enforced for abnormal termination, too.
- DoMonitorCheckOnExit<do_assignability_check>(self, &shadow_frame);
- return JValue(); /* Handled in caller. */
- } else {
- int32_t displacement = static_cast<int32_t>(found_dex_pc) - static_cast<int32_t>(dex_pc);
- ADVANCE(displacement);
- }
- }
-
-// Create alternative instruction handlers dedicated to instrumentation.
-// Return instructions must not call Instrumentation::DexPcMovedEvent since they already call
-// Instrumentation::MethodExited. This is to avoid posting debugger events twice for this location.
-// Note: we do not use the kReturn instruction flag here (to test the instruction is a return). The
-// compiler seems to not evaluate "(Instruction::FlagsOf(Instruction::code) & kReturn) != 0" to
-// a constant condition that would remove the "if" statement so the test is free.
-#define INSTRUMENTATION_INSTRUCTION_HANDLER(o, code, n, f, i, a, v) \
- alt_op_##code: { \
- if (UNLIKELY(instrumentation->HasDexPcListeners())) { \
- Object* this_object = shadow_frame.GetThisObject(code_item->ins_size_); \
- instrumentation->DexPcMovedEvent(self, this_object, shadow_frame.GetMethod(), dex_pc); \
- } \
- UPDATE_HANDLER_TABLE(); \
- goto *handlersTable[instrumentation::kMainHandlerTable][Instruction::code]; \
- }
-#include "dex_instruction_list.h"
- DEX_INSTRUCTION_LIST(INSTRUMENTATION_INSTRUCTION_HANDLER)
-#undef DEX_INSTRUCTION_LIST
-#undef INSTRUMENTATION_INSTRUCTION_HANDLER
-} // NOLINT(readability/fn_size)
-
-// Explicit definitions of ExecuteGotoImpl.
-template HOT_ATTR
-JValue ExecuteGotoImpl<true, false>(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register);
-template HOT_ATTR
-JValue ExecuteGotoImpl<false, false>(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register);
-template
-JValue ExecuteGotoImpl<true, true>(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register);
-template
-JValue ExecuteGotoImpl<false, true>(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register);
-
-#else
-
-template<bool do_access_check, bool transaction_active>
-JValue ExecuteGotoImpl(Thread*, const DexFile::CodeItem*, ShadowFrame&, JValue) {
- LOG(FATAL) << "UNREACHABLE";
- UNREACHABLE();
-}
-// Explicit definitions of ExecuteGotoImpl.
-template<>
-JValue ExecuteGotoImpl<true, false>(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register);
-template<>
-JValue ExecuteGotoImpl<false, false>(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register);
-template<>
-JValue ExecuteGotoImpl<true, true>(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register);
-template<>
-JValue ExecuteGotoImpl<false, true>(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register);
-#endif
-
-} // namespace interpreter
-} // namespace art
diff --git a/runtime/interpreter/interpreter_goto_table_impl.h b/runtime/interpreter/interpreter_goto_table_impl.h
deleted file mode 100644
index bb9be88..0000000
--- a/runtime/interpreter/interpreter_goto_table_impl.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_GOTO_TABLE_IMPL_H_
-#define ART_RUNTIME_INTERPRETER_INTERPRETER_GOTO_TABLE_IMPL_H_
-
-#include "base/macros.h"
-#include "base/mutex.h"
-#include "dex_file.h"
-#include "jvalue.h"
-
-namespace art {
-
-class ShadowFrame;
-class Thread;
-
-namespace interpreter {
-
-template<bool do_access_check, bool transaction_active>
-JValue ExecuteGotoImpl(Thread* self,
- const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame,
- JValue result_register) SHARED_REQUIRES(Locks::mutator_lock_);
-
-} // namespace interpreter
-} // namespace art
-
-#endif // ART_RUNTIME_INTERPRETER_INTERPRETER_GOTO_TABLE_IMPL_H_
diff --git a/runtime/interpreter/interpreter_mterp_impl.h b/runtime/interpreter/interpreter_mterp_impl.h
index 322df4e..90d9f89 100644
--- a/runtime/interpreter/interpreter_mterp_impl.h
+++ b/runtime/interpreter/interpreter_mterp_impl.h
@@ -33,7 +33,7 @@
extern "C" bool ExecuteMterpImpl(Thread* self,
const DexFile::CodeItem* code_item,
ShadowFrame* shadow_frame,
- JValue* result_register) SHARED_REQUIRES(Locks::mutator_lock_);
+ JValue* result_register) REQUIRES_SHARED(Locks::mutator_lock_);
} // namespace interpreter
} // namespace art
diff --git a/runtime/interpreter/interpreter_switch_impl.h b/runtime/interpreter/interpreter_switch_impl.h
index 90ec908..d0c9386 100644
--- a/runtime/interpreter/interpreter_switch_impl.h
+++ b/runtime/interpreter/interpreter_switch_impl.h
@@ -34,7 +34,7 @@
const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame,
JValue result_register,
- bool interpret_one_instruction) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool interpret_one_instruction) REQUIRES_SHARED(Locks::mutator_lock_);
} // namespace interpreter
} // namespace art
diff --git a/runtime/interpreter/mterp/mips64/footer.S b/runtime/interpreter/mterp/mips64/footer.S
index 9994169..4063162 100644
--- a/runtime/interpreter/mterp/mips64/footer.S
+++ b/runtime/interpreter/mterp/mips64/footer.S
@@ -217,7 +217,8 @@
b MterpDone
/*
* Returned value is expected in a0 and if it's not 64-bit, the 32 most
- * significant bits of a0 must be 0.
+ * significant bits of a0 must be zero-extended or sign-extended
+ * depending on the return type.
*/
MterpReturn:
ld a2, OFF_FP_RESULT_REGISTER(rFP)
diff --git a/runtime/interpreter/mterp/mips64/op_return.S b/runtime/interpreter/mterp/mips64/op_return.S
index ec986b8..b10c03f 100644
--- a/runtime/interpreter/mterp/mips64/op_return.S
+++ b/runtime/interpreter/mterp/mips64/op_return.S
@@ -1,7 +1,8 @@
+%default {"instr":"GET_VREG"}
/*
* Return a 32-bit value.
*
- * for: return, return-object
+ * for: return (sign-extend), return-object (zero-extend)
*/
/* op vAA */
.extern MterpThreadFenceForConstructor
@@ -14,5 +15,5 @@
jal MterpSuspendCheck # (self)
1:
srl a2, rINST, 8 # a2 <- AA
- GET_VREG_U a0, a2 # a0 <- vAA
+ $instr a0, a2 # a0 <- vAA
b MterpReturn
diff --git a/runtime/interpreter/mterp/mips64/op_return_object.S b/runtime/interpreter/mterp/mips64/op_return_object.S
index 67f1871..b69b880 100644
--- a/runtime/interpreter/mterp/mips64/op_return_object.S
+++ b/runtime/interpreter/mterp/mips64/op_return_object.S
@@ -1 +1 @@
-%include "mips64/op_return.S"
+%include "mips64/op_return.S" {"instr":"GET_VREG_U"}
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index 20a0753..a8c7d15 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -143,7 +143,7 @@
}
extern "C" size_t MterpShouldSwitchInterpreters()
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const instrumentation::Instrumentation* const instrumentation =
Runtime::Current()->GetInstrumentation();
return instrumentation->NonJitProfilingActive() || Dbg::IsDebuggerActive();
@@ -154,7 +154,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kVirtual, false, false>(
@@ -165,7 +165,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kSuper, false, false>(
@@ -176,7 +176,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kInterface, false, false>(
@@ -187,7 +187,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kDirect, false, false>(
@@ -198,7 +198,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kStatic, false, false>(
@@ -209,7 +209,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kVirtual, true, false>(
@@ -220,7 +220,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kSuper, true, false>(
@@ -231,7 +231,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kInterface, true, false>(
@@ -242,7 +242,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kDirect, true, false>(
@@ -253,7 +253,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kStatic, true, false>(
@@ -264,7 +264,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvokeVirtualQuick<false>(
@@ -275,7 +275,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvokeVirtualQuick<true>(
@@ -290,7 +290,7 @@
uint32_t tgt_vreg,
ShadowFrame* shadow_frame,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
String* s = ResolveString(self, *shadow_frame, index);
if (UNLIKELY(s == nullptr)) {
return true;
@@ -303,7 +303,7 @@
uint32_t tgt_vreg,
ShadowFrame* shadow_frame,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Class* c = ResolveVerifyAndClinit(index, shadow_frame->GetMethod(), self, false, false);
if (UNLIKELY(c == nullptr)) {
return true;
@@ -316,7 +316,7 @@
StackReference<mirror::Object>* vreg_addr,
art::ArtMethod* method,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Class* c = ResolveVerifyAndClinit(index, method, self, false, false);
if (UNLIKELY(c == nullptr)) {
return true;
@@ -334,7 +334,7 @@
StackReference<mirror::Object>* vreg_addr,
art::ArtMethod* method,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Class* c = ResolveVerifyAndClinit(index, method, self, false, false);
if (UNLIKELY(c == nullptr)) {
return false; // Caller will check for pending exception. Return value unimportant.
@@ -345,12 +345,12 @@
}
extern "C" size_t MterpFillArrayData(Object* obj, const Instruction::ArrayDataPayload* payload)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return FillArrayData(obj, payload);
}
extern "C" size_t MterpNewInstance(ShadowFrame* shadow_frame, Thread* self, uint32_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
Object* obj = nullptr;
Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame->GetMethod(),
@@ -375,7 +375,7 @@
extern "C" size_t MterpSputObject(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
uint32_t inst_data, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, false, false>
(self, *shadow_frame, inst, inst_data);
@@ -385,7 +385,7 @@
uint16_t* dex_pc_ptr,
uint32_t inst_data,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, false, false>
(self, *shadow_frame, inst, inst_data);
@@ -394,7 +394,7 @@
extern "C" size_t MterpIputObjectQuick(ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint32_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoIPutQuick<Primitive::kPrimNot, false>(*shadow_frame, inst, inst_data);
}
@@ -402,7 +402,7 @@
extern "C" size_t MterpAputObject(ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint32_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
Object* a = shadow_frame->GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == nullptr)) {
@@ -421,7 +421,7 @@
extern "C" size_t MterpFilledNewArray(ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoFilledNewArray<false, false, false>(inst, *shadow_frame, self,
shadow_frame->GetResultRegister());
@@ -430,7 +430,7 @@
extern "C" size_t MterpFilledNewArrayRange(ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoFilledNewArray<true, false, false>(inst, *shadow_frame, self,
shadow_frame->GetResultRegister());
@@ -439,7 +439,7 @@
extern "C" size_t MterpNewArray(ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint32_t inst_data, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
int32_t length = shadow_frame->GetVReg(inst->VRegB_22c(inst_data));
Object* obj = AllocArrayFromCode<false, true>(
@@ -453,7 +453,7 @@
}
extern "C" size_t MterpHandleException(Thread* self, ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(self->IsExceptionPending());
const instrumentation::Instrumentation* const instrumentation =
Runtime::Current()->GetInstrumentation();
@@ -469,7 +469,7 @@
}
extern "C" void MterpCheckBefore(Thread* self, ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
uint16_t inst_data = inst->Fetch16(0);
if (inst->Opcode(inst_data) == Instruction::MOVE_EXCEPTION) {
@@ -488,7 +488,7 @@
}
extern "C" void MterpLogDivideByZeroException(Thread* self, ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self);
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
uint16_t inst_data = inst->Fetch16(0);
@@ -496,7 +496,7 @@
}
extern "C" void MterpLogArrayIndexException(Thread* self, ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self);
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
uint16_t inst_data = inst->Fetch16(0);
@@ -504,7 +504,7 @@
}
extern "C" void MterpLogNegativeArraySizeException(Thread* self, ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self);
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
uint16_t inst_data = inst->Fetch16(0);
@@ -512,7 +512,7 @@
}
extern "C" void MterpLogNoSuchMethodException(Thread* self, ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self);
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
uint16_t inst_data = inst->Fetch16(0);
@@ -520,7 +520,7 @@
}
extern "C" void MterpLogExceptionThrownException(Thread* self, ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self);
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
uint16_t inst_data = inst->Fetch16(0);
@@ -528,7 +528,7 @@
}
extern "C" void MterpLogNullObjectException(Thread* self, ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self);
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
uint16_t inst_data = inst->Fetch16(0);
@@ -536,7 +536,7 @@
}
extern "C" void MterpLogFallback(Thread* self, ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self);
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
uint16_t inst_data = inst->Fetch16(0);
@@ -545,7 +545,7 @@
}
extern "C" void MterpLogOSR(Thread* self, ShadowFrame* shadow_frame, int32_t offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self);
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
uint16_t inst_data = inst->Fetch16(0);
@@ -553,7 +553,7 @@
}
extern "C" void MterpLogSuspendFallback(Thread* self, ShadowFrame* shadow_frame, uint32_t flags)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self);
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
uint16_t inst_data = inst->Fetch16(0);
@@ -565,7 +565,7 @@
}
extern "C" size_t MterpSuspendCheck(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
self->AllowThreadSuspension();
return MterpShouldSwitchInterpreters();
}
@@ -574,7 +574,7 @@
ArtMethod* referrer,
uint64_t* new_value,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int64_t));
if (LIKELY(field != nullptr)) {
@@ -595,7 +595,7 @@
mirror::Object* obj,
uint8_t new_value,
ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int8_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
Primitive::Type type = field->GetTypeAsPrimitiveType();
@@ -614,7 +614,7 @@
mirror::Object* obj,
uint16_t new_value,
ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
sizeof(int16_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -634,7 +634,7 @@
mirror::Object* obj,
uint32_t new_value,
ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
sizeof(int32_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -648,7 +648,7 @@
mirror::Object* obj,
uint64_t* new_value,
ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
sizeof(int64_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -662,7 +662,7 @@
mirror::Object* obj,
mirror::Object* new_value,
ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite,
sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -673,7 +673,7 @@
}
extern "C" mirror::Object* artAGetObjectFromMterp(mirror::Object* arr, int32_t index)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(arr == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
return nullptr;
@@ -687,7 +687,7 @@
}
extern "C" mirror::Object* artIGetObjectFromMterp(mirror::Object* obj, uint32_t field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
return nullptr;
@@ -702,7 +702,7 @@
* and regenerated following batch updates.
*/
extern "C" ssize_t MterpSetUpHotnessCountdown(ArtMethod* method, ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
uint16_t hotness_count = method->GetCounter();
int32_t countdown_value = jit::kJitHotnessDisabled;
jit::Jit* jit = Runtime::Current()->GetJit();
@@ -742,7 +742,7 @@
extern "C" ssize_t MterpAddHotnessBatch(ArtMethod* method,
ShadowFrame* shadow_frame,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
jit::Jit* jit = Runtime::Current()->GetJit();
if (jit != nullptr) {
int16_t count = shadow_frame->GetCachedHotnessCountdown() - shadow_frame->GetHotnessCountdown();
@@ -753,7 +753,7 @@
// TUNING: Unused by arm/arm64/x86/x86_64. Remove when mips/mips64 mterps support batch updates.
extern "C" size_t MterpProfileBranch(Thread* self, ShadowFrame* shadow_frame, int32_t offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = shadow_frame->GetMethod();
JValue* result = shadow_frame->GetResultRegister();
uint32_t dex_pc = shadow_frame->GetDexPC();
@@ -772,7 +772,7 @@
extern "C" size_t MterpMaybeDoOnStackReplacement(Thread* self,
ShadowFrame* shadow_frame,
int32_t offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = shadow_frame->GetMethod();
JValue* result = shadow_frame->GetResultRegister();
uint32_t dex_pc = shadow_frame->GetDexPC();
diff --git a/runtime/interpreter/mterp/mterp_stub.cc b/runtime/interpreter/mterp/mterp_stub.cc
index 7e7337e..35f8f1c 100644
--- a/runtime/interpreter/mterp/mterp_stub.cc
+++ b/runtime/interpreter/mterp/mterp_stub.cc
@@ -40,7 +40,7 @@
*/
extern "C" bool ExecuteMterpImpl(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame* shadow_frame, JValue* result_register)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self); UNUSED(shadow_frame); UNUSED(code_item); UNUSED(result_register);
UNIMPLEMENTED(art::FATAL);
return false;
diff --git a/runtime/interpreter/mterp/out/mterp_mips64.S b/runtime/interpreter/mterp/out/mterp_mips64.S
index a061f1e..88e972f 100644
--- a/runtime/interpreter/mterp/out/mterp_mips64.S
+++ b/runtime/interpreter/mterp/out/mterp_mips64.S
@@ -651,7 +651,7 @@
/*
* Return a 32-bit value.
*
- * for: return, return-object
+ * for: return (sign-extend), return-object (zero-extend)
*/
/* op vAA */
.extern MterpThreadFenceForConstructor
@@ -664,7 +664,7 @@
jal MterpSuspendCheck # (self)
1:
srl a2, rINST, 8 # a2 <- AA
- GET_VREG_U a0, a2 # a0 <- vAA
+ GET_VREG a0, a2 # a0 <- vAA
b MterpReturn
/* ------------------------------ */
@@ -697,7 +697,7 @@
/*
* Return a 32-bit value.
*
- * for: return, return-object
+ * for: return (sign-extend), return-object (zero-extend)
*/
/* op vAA */
.extern MterpThreadFenceForConstructor
@@ -710,7 +710,7 @@
jal MterpSuspendCheck # (self)
1:
srl a2, rINST, 8 # a2 <- AA
- GET_VREG_U a0, a2 # a0 <- vAA
+ GET_VREG_U a0, a2 # a0 <- vAA
b MterpReturn
@@ -12298,7 +12298,8 @@
b MterpDone
/*
* Returned value is expected in a0 and if it's not 64-bit, the 32 most
- * significant bits of a0 must be 0.
+ * significant bits of a0 must be zero-extended or sign-extended
+ * depending on the return type.
*/
MterpReturn:
ld a2, OFF_FP_RESULT_REGISTER(rFP)
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index a0e0e62..c614408 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -57,7 +57,7 @@
static void AbortTransactionOrFail(Thread* self, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void AbortTransactionOrFail(Thread* self, const char* fmt, ...) {
va_list args;
@@ -81,7 +81,7 @@
ShadowFrame* shadow_frame,
JValue* result,
size_t arg_offset,
- bool to_lower_case) SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool to_lower_case) REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t int_value = static_cast<uint32_t>(shadow_frame->GetVReg(arg_offset));
// Only ASCII (7-bit).
@@ -117,7 +117,7 @@
Handle<mirror::ClassLoader> class_loader, JValue* result,
const std::string& method_name, bool initialize_class,
bool abort_if_not_found)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(className.Get() != nullptr);
std::string descriptor(DotToDescriptor(className->ToModifiedUtf8().c_str()));
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -147,7 +147,7 @@
// actually the transaction abort exception. This must not be wrapped, as it signals an
// initialization abort.
static void CheckExceptionGenerateClassNotFound(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (self->IsExceptionPending()) {
// If it is not the transaction abort exception, wrap it.
std::string type(PrettyTypeOf(self->GetException()));
@@ -159,7 +159,7 @@
}
static mirror::String* GetClassName(Thread* self, ShadowFrame* shadow_frame, size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* param = shadow_frame->GetVRegReference(arg_offset);
if (param == nullptr) {
AbortTransactionOrFail(self, "Null-pointer in Class.forName.");
@@ -442,7 +442,7 @@
static void GetResourceAsStream(Thread* self,
ShadowFrame* shadow_frame,
JValue* result,
- size_t arg_offset) SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t arg_offset) REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* resource_obj = shadow_frame->GetVRegReference(arg_offset + 1);
if (resource_obj == nullptr) {
AbortTransactionOrFail(self, "null name for getResourceAsStream");
@@ -604,7 +604,7 @@
mirror::Array* src_array, int32_t src_pos,
mirror::Array* dst_array, int32_t dst_pos,
int32_t length)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (src_array->GetClass()->GetComponentType() != dst_array->GetClass()->GetComponentType()) {
AbortTransactionOrFail(self, "Types mismatched in arraycopy: %s vs %s.",
PrettyDescriptor(src_array->GetClass()->GetComponentType()).c_str(),
@@ -748,7 +748,7 @@
JValue* result,
size_t arg_offset,
bool is_default_version)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<4> hs(self);
Handle<mirror::String> h_key(
hs.NewHandle(reinterpret_cast<mirror::String*>(shadow_frame->GetVRegReference(arg_offset))));
@@ -915,7 +915,7 @@
}
static mirror::Object* GetDexFromDexCache(Thread* self, mirror::DexCache* dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile* dex_file = dex_cache->GetDexFile();
if (dex_file == nullptr) {
return nullptr;
@@ -1026,7 +1026,7 @@
static void UnstartedMemoryPeekArray(
Primitive::Type type, Thread* self, ShadowFrame* shadow_frame, size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
int64_t address_long = shadow_frame->GetVRegLong(arg_offset);
mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 2);
if (obj == nullptr) {
@@ -1173,7 +1173,7 @@
// This allows getting the char array for new style of String objects during compilation.
void UnstartedRuntime::UnstartedStringToCharArray(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::String* string = shadow_frame->GetVRegReference(arg_offset)->AsString();
if (string == nullptr) {
AbortTransactionOrFail(self, "String.charAt with null object");
@@ -1299,7 +1299,7 @@
void UnstartedRuntime::UnstartedUnsafeGetObjectVolatile(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Argument 0 is the Unsafe instance, skip.
mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 1);
if (obj == nullptr) {
@@ -1313,7 +1313,7 @@
void UnstartedRuntime::UnstartedUnsafePutObjectVolatile(
Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Argument 0 is the Unsafe instance, skip.
mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 1);
if (obj == nullptr) {
@@ -1331,7 +1331,7 @@
void UnstartedRuntime::UnstartedUnsafePutOrderedObject(
Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Argument 0 is the Unsafe instance, skip.
mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 1);
if (obj == nullptr) {
@@ -1352,7 +1352,7 @@
// of correctly handling the corner cases.
void UnstartedRuntime::UnstartedIntegerParseInt(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset);
if (obj == nullptr) {
AbortTransactionOrFail(self, "Cannot parse null string, retry at runtime.");
@@ -1396,7 +1396,7 @@
// well.
void UnstartedRuntime::UnstartedLongParseLong(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset);
if (obj == nullptr) {
AbortTransactionOrFail(self, "Cannot parse null string, retry at runtime.");
@@ -1437,7 +1437,7 @@
void UnstartedRuntime::UnstartedMethodInvoke(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JNIEnvExt* env = self->GetJniEnv();
ScopedObjectAccessUnchecked soa(self);
diff --git a/runtime/interpreter/unstarted_runtime.h b/runtime/interpreter/unstarted_runtime.h
index 03d7026..3f36a27 100644
--- a/runtime/interpreter/unstarted_runtime.h
+++ b/runtime/interpreter/unstarted_runtime.h
@@ -52,14 +52,14 @@
ShadowFrame* shadow_frame,
JValue* result,
size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void Jni(Thread* self,
ArtMethod* method,
mirror::Object* receiver,
uint32_t* args,
JValue* result)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
// Methods that intercept available libcore implementations.
@@ -68,7 +68,7 @@
ShadowFrame* shadow_frame, \
JValue* result, \
size_t arg_offset) \
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
#include "unstarted_runtime_list.h"
UNSTARTED_RUNTIME_DIRECT_LIST(UNSTARTED_DIRECT)
#undef UNSTARTED_RUNTIME_DIRECT_LIST
@@ -82,7 +82,7 @@
mirror::Object* receiver, \
uint32_t* args, \
JValue* result) \
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
#include "unstarted_runtime_list.h"
UNSTARTED_RUNTIME_JNI_LIST(UNSTARTED_JNI)
#undef UNSTARTED_RUNTIME_DIRECT_LIST
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index c324600..ba751ec 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -49,7 +49,7 @@
ShadowFrame* shadow_frame, \
JValue* result, \
size_t arg_offset) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
interpreter::UnstartedRuntime::Unstarted ## Name(self, shadow_frame, result, arg_offset); \
}
#include "unstarted_runtime_list.h"
@@ -65,7 +65,7 @@
mirror::Object* receiver, \
uint32_t* args, \
JValue* result) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
interpreter::UnstartedRuntime::UnstartedJNI ## Name(self, method, receiver, args, result); \
}
#include "unstarted_runtime_list.h"
@@ -83,7 +83,7 @@
Thread* self,
mirror::Class* component_type,
const StackHandleScope<3>& data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
mirror::Class* array_type = runtime->GetClassLinker()->FindArrayClass(self, &component_type);
CHECK(array_type != nullptr);
@@ -99,7 +99,7 @@
static void CheckObjectArray(mirror::ObjectArray<mirror::Object>* array,
const StackHandleScope<3>& data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK_EQ(array->GetLength(), 3);
CHECK_EQ(data.NumberOfReferences(), 3U);
for (size_t i = 0; i < 3; ++i) {
@@ -115,7 +115,7 @@
mirror::ObjectArray<mirror::Object>* dst,
int32_t dst_pos,
int32_t length)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue result;
tmp->SetVRegReference(0, src);
tmp->SetVReg(1, src_pos);
@@ -141,7 +141,7 @@
int32_t dst_pos,
int32_t length,
const StackHandleScope<3>& expected_result)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<3> hs_misc(self);
Handle<mirror::Class> dst_component_handle(hs_misc.NewHandle(dst_component_class));
@@ -167,7 +167,7 @@
ShadowFrame* tmp,
double const test_pairs[][2],
size_t num_pairs)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < num_pairs; ++i) {
tmp->SetVRegDouble(0, test_pairs[i][0]);
@@ -189,7 +189,7 @@
// Prepare for aborts. Aborts assume that the exception class is already resolved, as the
// loading code doesn't work under transactions.
- void PrepareForAborts() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void PrepareForAborts() REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* result = Runtime::Current()->GetClassLinker()->FindClass(
Thread::Current(),
Transaction::kAbortExceptionSignature,
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 2401bec..979495a 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -234,7 +234,7 @@
// See section 11.3 "Linking Native Methods" of the JNI spec.
void* FindNativeMethod(ArtMethod* m, std::string& detail)
REQUIRES(Locks::jni_libraries_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::string jni_short_name(JniShortName(m));
std::string jni_long_name(JniLongName(m));
mirror::ClassLoader* const declaring_class_loader = m->GetDeclaringClass()->GetClassLoader();
@@ -273,7 +273,7 @@
// Unload native libraries with cleared class loaders.
void UnloadNativeLibraries()
REQUIRES(!Locks::jni_libraries_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedObjectAccessUnchecked soa(Thread::Current());
std::vector<SharedLibrary*> unload_libraries;
{
diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h
index ed9d3ab..a10a72f 100644
--- a/runtime/java_vm_ext.h
+++ b/runtime/java_vm_ext.h
@@ -81,7 +81,7 @@
// such as NewByteArray.
// If -verbose:third-party-jni is on, we want to log any JNI function calls
// made by a third-party native method.
- bool ShouldTrace(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool ShouldTrace(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
/**
* Loads the given shared library. 'path' is an absolute pathname.
@@ -98,67 +98,67 @@
// Unload native libraries with cleared class loaders.
void UnloadNativeLibraries()
REQUIRES(!Locks::jni_libraries_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/**
* Returns a pointer to the code for the native method 'm', found
* using dlsym(3) on every native library that's been loaded so far.
*/
void* FindCodeForNativeMethod(ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void DumpForSigQuit(std::ostream& os)
REQUIRES(!Locks::jni_libraries_lock_, !globals_lock_, !weak_globals_lock_);
void DumpReferenceTables(std::ostream& os)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!globals_lock_, !weak_globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!globals_lock_, !weak_globals_lock_);
bool SetCheckJniEnabled(bool enabled);
- void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_)
+ void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!globals_lock_);
- void DisallowNewWeakGlobals() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
- void AllowNewWeakGlobals() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
- void BroadcastForNewWeakGlobals() SHARED_REQUIRES(Locks::mutator_lock_)
+ void DisallowNewWeakGlobals() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
+ void AllowNewWeakGlobals() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
+ void BroadcastForNewWeakGlobals() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!weak_globals_lock_);
jobject AddGlobalRef(Thread* self, mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!globals_lock_);
jweak AddWeakGlobalRef(Thread* self, mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
void DeleteGlobalRef(Thread* self, jobject obj) REQUIRES(!globals_lock_);
void DeleteWeakGlobalRef(Thread* self, jweak obj) REQUIRES(!weak_globals_lock_);
void SweepJniWeakGlobals(IsMarkedVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
mirror::Object* DecodeGlobal(IndirectRef ref)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void UpdateGlobal(Thread* self, IndirectRef ref, mirror::Object* result)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!globals_lock_);
mirror::Object* DecodeWeakGlobal(Thread* self, IndirectRef ref)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!weak_globals_lock_);
mirror::Object* DecodeWeakGlobalLocked(Thread* self, IndirectRef ref)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(weak_globals_lock_);
// Like DecodeWeakGlobal() but to be used only during a runtime shutdown where self may be
// null.
mirror::Object* DecodeWeakGlobalDuringShutdown(Thread* self, IndirectRef ref)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!weak_globals_lock_);
// Checks if the weak global ref has been cleared by the GC without decode (read barrier.)
bool IsWeakGlobalCleared(Thread* self, IndirectRef ref)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!weak_globals_lock_);
Mutex& WeakGlobalsLock() RETURN_CAPABILITY(weak_globals_lock_) {
@@ -166,13 +166,13 @@
}
void UpdateWeakGlobal(Thread* self, IndirectRef ref, mirror::Object* result)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
const JNIInvokeInterface* GetUncheckedFunctions() const {
return unchecked_functions_;
}
- void TrimGlobals() SHARED_REQUIRES(Locks::mutator_lock_)
+ void TrimGlobals() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!globals_lock_);
jint HandleGetEnv(/*out*/void** env, jint version);
@@ -183,9 +183,9 @@
private:
// Return true if self can currently access weak globals.
- bool MayAccessWeakGlobalsUnlocked(Thread* self) const SHARED_REQUIRES(Locks::mutator_lock_);
+ bool MayAccessWeakGlobalsUnlocked(Thread* self) const REQUIRES_SHARED(Locks::mutator_lock_);
bool MayAccessWeakGlobals(Thread* self) const
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(weak_globals_lock_);
Runtime* const runtime_;
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index ae02fe6..e5d34e1 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -88,7 +88,7 @@
uint64_t dex_pc;
};
std::ostream& operator<<(std::ostream& os, const JdwpLocation& rhs)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool operator==(const JdwpLocation& lhs, const JdwpLocation& rhs);
bool operator!=(const JdwpLocation& lhs, const JdwpLocation& rhs);
@@ -186,7 +186,7 @@
* The VM has finished initializing. Only called when the debugger is
* connected at the time initialization completes.
*/
- void PostVMStart() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!jdwp_token_lock_);
+ void PostVMStart() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!jdwp_token_lock_);
/*
* A location of interest has been reached. This is used for breakpoints,
@@ -202,7 +202,7 @@
*/
void PostLocationEvent(const EventLocation* pLoc, mirror::Object* thisPtr, int eventFlags,
const JValue* returnValue)
- REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
/*
* A field of interest has been accessed or modified. This is used for field access and field
@@ -213,7 +213,7 @@
*/
void PostFieldEvent(const EventLocation* pLoc, ArtField* field, mirror::Object* thisPtr,
const JValue* fieldValue, bool is_modification)
- REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
/*
* An exception has been thrown.
@@ -222,19 +222,19 @@
*/
void PostException(const EventLocation* pThrowLoc, mirror::Throwable* exception_object,
const EventLocation* pCatchLoc, mirror::Object* thisPtr)
- REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
/*
* A thread has started or stopped.
*/
void PostThreadChange(Thread* thread, bool start)
- REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Class has been prepared.
*/
void PostClassPrepare(mirror::Class* klass)
- REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
/*
* The VM is about to stop.
@@ -242,7 +242,7 @@
bool PostVMDeath();
// Called if/when we realize we're talking to DDMS.
- void NotifyDdmsActive() SHARED_REQUIRES(Locks::mutator_lock_);
+ void NotifyDdmsActive() REQUIRES_SHARED(Locks::mutator_lock_);
void SetupChunkHeader(uint32_t type, size_t data_len, size_t header_size, uint8_t* out_header);
@@ -251,7 +251,7 @@
* Send up a chunk of DDM data.
*/
void DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool HandlePacket() REQUIRES(!shutdown_lock_, !jdwp_token_lock_);
@@ -259,7 +259,7 @@
void ResetState()
REQUIRES(!event_list_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/* atomic ops to get next serial number */
uint32_t NextRequestSerial();
@@ -277,21 +277,21 @@
*/
JdwpError RegisterEvent(JdwpEvent* pEvent)
REQUIRES(!event_list_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Unregister an event, given the requestId.
*/
void UnregisterEventById(uint32_t requestId)
REQUIRES(!event_list_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Unregister all events.
*/
void UnregisterAll()
REQUIRES(!event_list_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
explicit JdwpState(const JdwpOptions* options);
@@ -303,18 +303,18 @@
REQUIRES(!Locks::mutator_lock_);
void SendRequestAndPossiblySuspend(ExpandBuf* pReq, JdwpSuspendPolicy suspend_policy,
ObjectId threadId)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!jdwp_token_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!jdwp_token_lock_);
void CleanupMatchList(const std::vector<JdwpEvent*>& match_list)
- REQUIRES(event_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void EventFinish(ExpandBuf* pReq);
bool FindMatchingEvents(JdwpEventKind eventKind, const ModBasket& basket,
std::vector<JdwpEvent*>* match_list)
- REQUIRES(!event_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void FindMatchingEventsLocked(JdwpEventKind eventKind, const ModBasket& basket,
std::vector<JdwpEvent*>* match_list)
- REQUIRES(event_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void UnregisterEvent(JdwpEvent* pEvent)
- REQUIRES(event_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void SendBufferedRequest(uint32_t type, const std::vector<iovec>& iov);
/*
@@ -410,9 +410,9 @@
bool processing_request_ GUARDED_BY(shutdown_lock_);
};
-std::string DescribeField(const FieldId& field_id) SHARED_REQUIRES(Locks::mutator_lock_);
-std::string DescribeMethod(const MethodId& method_id) SHARED_REQUIRES(Locks::mutator_lock_);
-std::string DescribeRefTypeId(const RefTypeId& ref_type_id) SHARED_REQUIRES(Locks::mutator_lock_);
+std::string DescribeField(const FieldId& field_id) REQUIRES_SHARED(Locks::mutator_lock_);
+std::string DescribeMethod(const MethodId& method_id) REQUIRES_SHARED(Locks::mutator_lock_);
+std::string DescribeRefTypeId(const RefTypeId& ref_type_id) REQUIRES_SHARED(Locks::mutator_lock_);
class Request {
public:
@@ -428,9 +428,9 @@
uint32_t ReadUnsigned32(const char* what);
- FieldId ReadFieldId() SHARED_REQUIRES(Locks::mutator_lock_);
+ FieldId ReadFieldId() REQUIRES_SHARED(Locks::mutator_lock_);
- MethodId ReadMethodId() SHARED_REQUIRES(Locks::mutator_lock_);
+ MethodId ReadMethodId() REQUIRES_SHARED(Locks::mutator_lock_);
ObjectId ReadObjectId(const char* specific_kind);
@@ -442,7 +442,7 @@
ObjectId ReadThreadGroupId();
- RefTypeId ReadRefTypeId() SHARED_REQUIRES(Locks::mutator_lock_);
+ RefTypeId ReadRefTypeId() REQUIRES_SHARED(Locks::mutator_lock_);
FrameId ReadFrameId();
@@ -456,7 +456,7 @@
JdwpTypeTag ReadTypeTag();
- JdwpLocation ReadLocation() SHARED_REQUIRES(Locks::mutator_lock_);
+ JdwpLocation ReadLocation() REQUIRES_SHARED(Locks::mutator_lock_);
JdwpModKind ReadModKind();
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 06b67b3..e2d29fe 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -447,7 +447,7 @@
* need to do this even if later mods cause us to ignore the event.
*/
static bool ModsMatch(JdwpEvent* pEvent, const ModBasket& basket)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JdwpEventMod* pMod = pEvent->mods;
for (int i = pEvent->modCount; i > 0; i--, pMod++) {
@@ -783,7 +783,7 @@
static void LogMatchingEventsAndThread(const std::vector<JdwpEvent*> match_list,
ObjectId thread_id)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0, e = match_list.size(); i < e; ++i) {
JdwpEvent* pEvent = match_list[i];
VLOG(jdwp) << "EVENT #" << i << ": " << pEvent->eventKind
@@ -799,7 +799,7 @@
static void SetJdwpLocationFromEventLocation(const JDWP::EventLocation* event_location,
JDWP::JdwpLocation* jdwp_location)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(event_location != nullptr);
DCHECK(jdwp_location != nullptr);
Dbg::SetJdwpLocation(jdwp_location, event_location->method, event_location->dex_pc);
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 6278ef0..f6008ac 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -54,7 +54,7 @@
}
static JdwpError WriteTaggedObject(ExpandBuf* reply, ObjectId object_id)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
uint8_t tag;
JdwpError rc = Dbg::GetObjectTag(object_id, &tag);
if (rc == ERR_NONE) {
@@ -65,7 +65,7 @@
}
static JdwpError WriteTaggedObjectList(ExpandBuf* reply, const std::vector<ObjectId>& objects)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
expandBufAdd4BE(reply, objects.size());
for (size_t i = 0; i < objects.size(); ++i) {
JdwpError rc = WriteTaggedObject(reply, objects[i]);
@@ -85,7 +85,7 @@
static JdwpError RequestInvoke(JdwpState*, Request* request,
ObjectId thread_id, ObjectId object_id,
RefTypeId class_id, MethodId method_id, bool is_constructor)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(!is_constructor || object_id != 0);
int32_t arg_count = request->ReadSigned32("argument count");
@@ -124,7 +124,7 @@
}
static JdwpError VM_Version(JdwpState*, Request*, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Text information on runtime version.
std::string version(StringPrintf("Android Runtime %s", Runtime::Current()->GetVersion()));
expandBufAddUtf8String(pReply, version);
@@ -148,7 +148,7 @@
* been loaded by multiple class loaders.
*/
static JdwpError VM_ClassesBySignature(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::string classDescriptor(request->ReadUtf8String());
std::vector<RefTypeId> ids;
@@ -180,7 +180,7 @@
* to be suspended, and that violates some JDWP expectations.
*/
static JdwpError VM_AllThreads(JdwpState*, Request*, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::vector<ObjectId> thread_ids;
Dbg::GetThreads(nullptr /* all thread groups */, &thread_ids);
@@ -196,7 +196,7 @@
* List all thread groups that do not have a parent.
*/
static JdwpError VM_TopLevelThreadGroups(JdwpState*, Request*, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
/*
* TODO: maintain a list of parentless thread groups in the VM.
*
@@ -215,7 +215,7 @@
* Respond with the sizes of the basic debugger types.
*/
static JdwpError VM_IDSizes(JdwpState*, Request*, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
expandBufAdd4BE(pReply, sizeof(FieldId));
expandBufAdd4BE(pReply, sizeof(MethodId));
expandBufAdd4BE(pReply, sizeof(ObjectId));
@@ -225,7 +225,7 @@
}
static JdwpError VM_Dispose(JdwpState*, Request*, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Dbg::Dispose();
return ERR_NONE;
}
@@ -237,7 +237,7 @@
* This needs to increment the "suspend count" on all threads.
*/
static JdwpError VM_Suspend(JdwpState*, Request*, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Thread* self = Thread::Current();
ScopedThreadSuspension sts(self, kWaitingForDebuggerSuspension);
Dbg::SuspendVM();
@@ -248,13 +248,13 @@
* Resume execution. Decrements the "suspend count" of all threads.
*/
static JdwpError VM_Resume(JdwpState*, Request*, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Dbg::ResumeVM();
return ERR_NONE;
}
static JdwpError VM_Exit(JdwpState* state, Request* request, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t exit_status = request->ReadUnsigned32("exit_status");
state->ExitAfterReplying(exit_status);
return ERR_NONE;
@@ -267,7 +267,7 @@
* string "java.util.Arrays".)
*/
static JdwpError VM_CreateString(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::string str(request->ReadUtf8String());
ObjectId string_id;
JdwpError status = Dbg::CreateString(str, &string_id);
@@ -279,7 +279,7 @@
}
static JdwpError VM_ClassPaths(JdwpState*, Request*, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
expandBufAddUtf8String(pReply, "/");
std::vector<std::string> class_path;
@@ -300,7 +300,7 @@
}
static JdwpError VM_DisposeObjects(JdwpState*, Request* request, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
size_t object_count = request->ReadUnsigned32("object_count");
for (size_t i = 0; i < object_count; ++i) {
ObjectId object_id = request->ReadObjectId();
@@ -311,7 +311,7 @@
}
static JdwpError VM_Capabilities(JdwpState*, Request*, ExpandBuf* reply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
expandBufAdd1(reply, true); // canWatchFieldModification
expandBufAdd1(reply, true); // canWatchFieldAccess
expandBufAdd1(reply, true); // canGetBytecodes
@@ -323,7 +323,7 @@
}
static JdwpError VM_CapabilitiesNew(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// The first few capabilities are the same as those reported by the older call.
VM_Capabilities(nullptr, request, reply);
@@ -350,7 +350,7 @@
}
static JdwpError VM_AllClassesImpl(ExpandBuf* pReply, bool descriptor_and_status, bool generic)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::vector<JDWP::RefTypeId> classes;
Dbg::GetClassList(&classes);
@@ -381,17 +381,17 @@
}
static JdwpError VM_AllClasses(JdwpState*, Request*, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return VM_AllClassesImpl(pReply, true, false);
}
static JdwpError VM_AllClassesWithGeneric(JdwpState*, Request*, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return VM_AllClassesImpl(pReply, true, true);
}
static JdwpError VM_InstanceCounts(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
int32_t class_count = request->ReadSigned32("class count");
if (class_count < 0) {
return ERR_ILLEGAL_ARGUMENT;
@@ -415,7 +415,7 @@
}
static JdwpError RT_Modifiers(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
return Dbg::GetModifiers(refTypeId, pReply);
}
@@ -424,7 +424,7 @@
* Get values from static fields in a reference type.
*/
static JdwpError RT_GetValues(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
int32_t field_count = request->ReadSigned32("field count");
expandBufAdd4BE(pReply, field_count);
@@ -442,7 +442,7 @@
* Get the name of the source file in which a reference type was declared.
*/
static JdwpError RT_SourceFile(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
std::string source_file;
JdwpError status = Dbg::GetSourceFile(refTypeId, &source_file);
@@ -457,7 +457,7 @@
* Return the current status of the reference type.
*/
static JdwpError RT_Status(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
JDWP::JdwpTypeTag type_tag;
uint32_t class_status;
@@ -473,7 +473,7 @@
* Return interfaces implemented directly by this class.
*/
static JdwpError RT_Interfaces(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
return Dbg::OutputDeclaredInterfaces(refTypeId, pReply);
}
@@ -482,7 +482,7 @@
* Return the class object corresponding to this type.
*/
static JdwpError RT_ClassObject(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
ObjectId class_object_id;
JdwpError status = Dbg::GetClassObject(refTypeId, &class_object_id);
@@ -500,13 +500,13 @@
* JDB seems interested, but DEX files don't currently support this.
*/
static JdwpError RT_SourceDebugExtension(JdwpState*, Request*, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
/* referenceTypeId in, string out */
return ERR_ABSENT_INFORMATION;
}
static JdwpError RT_Signature(JdwpState*, Request* request, ExpandBuf* pReply, bool with_generic)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
std::string signature;
@@ -522,12 +522,12 @@
}
static JdwpError RT_Signature(JdwpState* state, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return RT_Signature(state, request, pReply, false);
}
static JdwpError RT_SignatureWithGeneric(JdwpState* state, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return RT_Signature(state, request, pReply, true);
}
@@ -536,7 +536,7 @@
* reference type, or null if it was loaded by the system loader.
*/
static JdwpError RT_ClassLoader(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
return Dbg::GetClassLoader(refTypeId, pReply);
}
@@ -546,14 +546,14 @@
* fields declared by a class.
*/
static JdwpError RT_FieldsWithGeneric(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
return Dbg::OutputDeclaredFields(refTypeId, true, pReply);
}
// Obsolete equivalent of FieldsWithGeneric, without the generic type information.
static JdwpError RT_Fields(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
return Dbg::OutputDeclaredFields(refTypeId, false, pReply);
}
@@ -563,20 +563,20 @@
* methods declared by a class.
*/
static JdwpError RT_MethodsWithGeneric(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
return Dbg::OutputDeclaredMethods(refTypeId, true, pReply);
}
// Obsolete equivalent of MethodsWithGeneric, without the generic type information.
static JdwpError RT_Methods(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
return Dbg::OutputDeclaredMethods(refTypeId, false, pReply);
}
static JdwpError RT_Instances(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId class_id = request->ReadRefTypeId();
int32_t max_count = request->ReadSigned32("max count");
if (max_count < 0) {
@@ -596,7 +596,7 @@
* Return the immediate superclass of a class.
*/
static JdwpError CT_Superclass(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId class_id = request->ReadRefTypeId();
RefTypeId superClassId;
JdwpError status = Dbg::GetSuperclass(class_id, &superClassId);
@@ -611,7 +611,7 @@
* Set static class values.
*/
static JdwpError CT_SetValues(JdwpState* , Request* request, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId class_id = request->ReadRefTypeId();
int32_t values_count = request->ReadSigned32("values count");
@@ -641,7 +641,7 @@
*/
static JdwpError CT_InvokeMethod(JdwpState* state, Request* request,
ExpandBuf* pReply ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId class_id = request->ReadRefTypeId();
ObjectId thread_id = request->ReadThreadId();
MethodId method_id = request->ReadMethodId();
@@ -658,7 +658,7 @@
*/
static JdwpError CT_NewInstance(JdwpState* state, Request* request,
ExpandBuf* pReply ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId class_id = request->ReadRefTypeId();
ObjectId thread_id = request->ReadThreadId();
MethodId method_id = request->ReadMethodId();
@@ -675,7 +675,7 @@
* Create a new array object of the requested type and length.
*/
static JdwpError AT_newInstance(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId arrayTypeId = request->ReadRefTypeId();
int32_t length = request->ReadSigned32("length");
@@ -694,7 +694,7 @@
*/
static JdwpError IT_InvokeMethod(JdwpState* state, Request* request,
ExpandBuf* pReply ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId class_id = request->ReadRefTypeId();
ObjectId thread_id = request->ReadThreadId();
MethodId method_id = request->ReadMethodId();
@@ -706,7 +706,7 @@
* Return line number information for the method, if present.
*/
static JdwpError M_LineTable(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
MethodId method_id = request->ReadMethodId();
@@ -717,7 +717,7 @@
static JdwpError M_VariableTable(JdwpState*, Request* request, ExpandBuf* pReply,
bool generic)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId class_id = request->ReadRefTypeId();
MethodId method_id = request->ReadMethodId();
@@ -730,17 +730,17 @@
}
static JdwpError M_VariableTable(JdwpState* state, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return M_VariableTable(state, request, pReply, false);
}
static JdwpError M_VariableTableWithGeneric(JdwpState* state, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return M_VariableTable(state, request, pReply, true);
}
static JdwpError M_Bytecodes(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId class_id = request->ReadRefTypeId();
MethodId method_id = request->ReadMethodId();
@@ -760,7 +760,7 @@
// Default implementation for IDEs relying on this command.
static JdwpError M_IsObsolete(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
request->ReadRefTypeId(); // unused reference type ID
request->ReadMethodId(); // unused method ID
expandBufAdd1(reply, false); // a method is never obsolete.
@@ -775,7 +775,7 @@
* passed in here.
*/
static JdwpError OR_ReferenceType(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
return Dbg::GetReferenceType(object_id, pReply);
}
@@ -784,7 +784,7 @@
* Get values from the fields of an object.
*/
static JdwpError OR_GetValues(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
int32_t field_count = request->ReadSigned32("field count");
@@ -804,7 +804,7 @@
* Set values in the fields of an object.
*/
static JdwpError OR_SetValues(JdwpState*, Request* request, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
int32_t field_count = request->ReadSigned32("field count");
@@ -826,7 +826,7 @@
}
static JdwpError OR_MonitorInfo(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
return Dbg::GetMonitorInfo(object_id, reply);
}
@@ -844,7 +844,7 @@
*/
static JdwpError OR_InvokeMethod(JdwpState* state, Request* request,
ExpandBuf* pReply ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
ObjectId thread_id = request->ReadThreadId();
RefTypeId class_id = request->ReadRefTypeId();
@@ -854,19 +854,19 @@
}
static JdwpError OR_DisableCollection(JdwpState*, Request* request, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
return Dbg::DisableCollection(object_id);
}
static JdwpError OR_EnableCollection(JdwpState*, Request* request, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
return Dbg::EnableCollection(object_id);
}
static JdwpError OR_IsCollected(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
bool is_collected;
JdwpError rc = Dbg::IsCollected(object_id, &is_collected);
@@ -875,7 +875,7 @@
}
static JdwpError OR_ReferringObjects(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
int32_t max_count = request->ReadSigned32("max count");
if (max_count < 0) {
@@ -895,7 +895,7 @@
* Return the string value in a string object.
*/
static JdwpError SR_Value(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId stringObject = request->ReadObjectId();
std::string str;
JDWP::JdwpError error = Dbg::StringToUtf8(stringObject, &str);
@@ -914,7 +914,7 @@
* Return a thread's name.
*/
static JdwpError TR_Name(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
std::string name;
@@ -935,7 +935,7 @@
* resume it; only the JDI is allowed to resume it.
*/
static JdwpError TR_Suspend(JdwpState*, Request* request, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
if (thread_id == Dbg::GetThreadSelfId()) {
@@ -953,7 +953,7 @@
* Resume the specified thread.
*/
static JdwpError TR_Resume(JdwpState*, Request* request, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
if (thread_id == Dbg::GetThreadSelfId()) {
@@ -969,7 +969,7 @@
* Return status of specified thread.
*/
static JdwpError TR_Status(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
JDWP::JdwpThreadStatus threadStatus;
@@ -991,7 +991,7 @@
* Return the thread group that the specified thread is a member of.
*/
static JdwpError TR_ThreadGroup(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
return Dbg::GetThreadGroup(thread_id, pReply);
}
@@ -1003,7 +1003,7 @@
* be THREAD_NOT_SUSPENDED.
*/
static JdwpError TR_Frames(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
uint32_t start_frame = request->ReadUnsigned32("start frame");
uint32_t length = request->ReadUnsigned32("length");
@@ -1035,7 +1035,7 @@
* Returns the #of frames on the specified thread, which must be suspended.
*/
static JdwpError TR_FrameCount(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
size_t frame_count;
@@ -1049,7 +1049,7 @@
}
static JdwpError TR_OwnedMonitors(Request* request, ExpandBuf* reply, bool with_stack_depths)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
std::vector<ObjectId> monitors;
@@ -1073,17 +1073,17 @@
}
static JdwpError TR_OwnedMonitors(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return TR_OwnedMonitors(request, reply, false);
}
static JdwpError TR_OwnedMonitorsStackDepthInfo(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return TR_OwnedMonitors(request, reply, true);
}
static JdwpError TR_CurrentContendedMonitor(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
ObjectId contended_monitor;
@@ -1095,7 +1095,7 @@
}
static JdwpError TR_Interrupt(JdwpState*, Request* request, ExpandBuf* reply ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
return Dbg::Interrupt(thread_id);
}
@@ -1107,7 +1107,7 @@
* its suspend count recently.)
*/
static JdwpError TR_DebugSuspendCount(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
return Dbg::GetThreadDebugSuspendCount(thread_id, pReply);
}
@@ -1118,7 +1118,7 @@
* The Eclipse debugger recognizes "main" and "system" as special.
*/
static JdwpError TGR_Name(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_group_id = request->ReadThreadGroupId();
return Dbg::GetThreadGroupName(thread_group_id, pReply);
}
@@ -1128,7 +1128,7 @@
* thread group.
*/
static JdwpError TGR_Parent(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_group_id = request->ReadThreadGroupId();
return Dbg::GetThreadGroupParent(thread_group_id, pReply);
}
@@ -1138,7 +1138,7 @@
* specified thread group.
*/
static JdwpError TGR_Children(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_group_id = request->ReadThreadGroupId();
return Dbg::GetThreadGroupChildren(thread_group_id, pReply);
}
@@ -1147,7 +1147,7 @@
* Return the #of components in the array.
*/
static JdwpError AR_Length(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId array_id = request->ReadArrayId();
int32_t length;
@@ -1166,7 +1166,7 @@
* Return the values from an array.
*/
static JdwpError AR_GetValues(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId array_id = request->ReadArrayId();
uint32_t offset = request->ReadUnsigned32("offset");
uint32_t length = request->ReadUnsigned32("length");
@@ -1177,7 +1177,7 @@
* Set values in an array.
*/
static JdwpError AR_SetValues(JdwpState*, Request* request, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId array_id = request->ReadArrayId();
uint32_t offset = request->ReadUnsigned32("offset");
uint32_t count = request->ReadUnsigned32("count");
@@ -1185,7 +1185,7 @@
}
static JdwpError CLR_VisibleClasses(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
request->ReadObjectId(); // classLoaderObject
// TODO: we should only return classes which have the given class loader as a defining or
// initiating loader. The former would be easy; the latter is hard, because we don't have
@@ -1206,7 +1206,7 @@
* Reply with a requestID.
*/
static JdwpError ER_Set(JdwpState* state, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JdwpEventKind event_kind = request->ReadEnum1<JdwpEventKind>("event kind");
JdwpSuspendPolicy suspend_policy = request->ReadEnum1<JdwpSuspendPolicy>("suspend policy");
int32_t modifier_count = request->ReadSigned32("modifier count");
@@ -1348,7 +1348,7 @@
}
static JdwpError ER_Clear(JdwpState* state, Request* request, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
request->ReadEnum1<JdwpEventKind>("event kind");
uint32_t requestId = request->ReadUnsigned32("request id");
@@ -1362,7 +1362,7 @@
* Return the values of arguments and local variables.
*/
static JdwpError SF_GetValues(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return Dbg::GetLocalValues(request, pReply);
}
@@ -1370,12 +1370,12 @@
* Set the values of arguments and local variables.
*/
static JdwpError SF_SetValues(JdwpState*, Request* request, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return Dbg::SetLocalValues(request);
}
static JdwpError SF_ThisObject(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
FrameId frame_id = request->ReadFrameId();
@@ -1396,7 +1396,7 @@
* that, or I have no idea what this is for.)
*/
static JdwpError COR_ReflectedType(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId class_object_id = request->ReadRefTypeId();
return Dbg::GetReflectedType(class_object_id, pReply);
}
@@ -1405,7 +1405,7 @@
* Handle a DDM packet with a single chunk in it.
*/
static JdwpError DDM_Chunk(JdwpState* state, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
state->NotifyDdmsActive();
uint8_t* replyBuf = nullptr;
int replyLen = -1;
diff --git a/runtime/jdwp/object_registry.cc b/runtime/jdwp/object_registry.cc
index 3fbad36..5989b61 100644
--- a/runtime/jdwp/object_registry.cc
+++ b/runtime/jdwp/object_registry.cc
@@ -63,12 +63,12 @@
// Explicit template instantiation.
template
-SHARED_REQUIRES(Locks::mutator_lock_)
+REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
JDWP::ObjectId ObjectRegistry::Add(Handle<mirror::Object> obj_h);
template
-SHARED_REQUIRES(Locks::mutator_lock_)
+REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
JDWP::ObjectId ObjectRegistry::Add(Handle<mirror::Throwable> obj_h);
diff --git a/runtime/jdwp/object_registry.h b/runtime/jdwp/object_registry.h
index 17490f4..7fa57c6 100644
--- a/runtime/jdwp/object_registry.h
+++ b/runtime/jdwp/object_registry.h
@@ -63,24 +63,24 @@
ObjectRegistry();
JDWP::ObjectId Add(mirror::Object* o)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_);
JDWP::RefTypeId AddRefType(mirror::Class* c)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_);
template<class T>
JDWP::ObjectId Add(Handle<T> obj_h)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_);
JDWP::RefTypeId AddRefType(Handle<mirror::Class> c_h)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_);
template<typename T> T Get(JDWP::ObjectId id, JDWP::JdwpError* error)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_) {
if (id == 0) {
*error = JDWP::ERR_NONE;
return nullptr;
@@ -88,42 +88,42 @@
return down_cast<T>(InternalGet(id, error));
}
- void Clear() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
+ void Clear() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_);
void DisableCollection(JDWP::ObjectId id)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_);
void EnableCollection(JDWP::ObjectId id)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_);
bool IsCollected(JDWP::ObjectId id)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_);
void DisposeObject(JDWP::ObjectId id, uint32_t reference_count)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_);
// This is needed to get the jobject instead of the Object*.
// Avoid using this and use standard Get when possible.
- jobject GetJObject(JDWP::ObjectId id) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
+ jobject GetJObject(JDWP::ObjectId id) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_);
private:
template<class T>
JDWP::ObjectId InternalAdd(Handle<T> obj_h)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
mirror::Object* InternalGet(JDWP::ObjectId id, JDWP::JdwpError* error)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_);
void Demote(ObjectRegistryEntry& entry)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(lock_);
void Promote(ObjectRegistryEntry& entry)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(lock_);
bool ContainsLocked(Thread* self, mirror::Object* o, int32_t identity_hash_code,
ObjectRegistryEntry** out_entry)
- REQUIRES(lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(lock_) REQUIRES_SHARED(Locks::mutator_lock_);
Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::multimap<int32_t, ObjectRegistryEntry*> object_to_entry_ GUARDED_BY(lock_);
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 2aa6f3d..417a185 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -31,6 +31,7 @@
class ArtMethod;
struct RuntimeArgumentMap;
+union JValue;
namespace jit {
@@ -50,7 +51,7 @@
virtual ~Jit();
static Jit* Create(JitOptions* options, std::string* error_msg);
bool CompileMethod(ArtMethod* method, Thread* self, bool osr)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CreateThreadPool();
const JitCodeCache* GetCodeCache() const {
@@ -70,7 +71,7 @@
void AddMemoryUsage(ArtMethod* method, size_t bytes)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
size_t OSRMethodThreshold() const {
return osr_method_threshold_;
@@ -102,25 +103,25 @@
// Profiling methods.
void MethodEntered(Thread* thread, ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void AddSamples(Thread* self, ArtMethod* method, uint16_t samples, bool with_backedges)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void InvokeVirtualOrInterface(Thread* thread,
mirror::Object* this_object,
ArtMethod* caller,
uint32_t dex_pc,
ArtMethod* callee)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void NotifyInterpreterToCompiledCodeTransition(Thread* self, ArtMethod* caller)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
AddSamples(self, caller, invoke_transition_weight_, false);
}
void NotifyCompiledCodeToInterpreterTransition(Thread* self, ArtMethod* callee)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
AddSamples(self, callee, invoke_transition_weight_, false);
}
@@ -140,7 +141,7 @@
void DumpForSigQuit(std::ostream& os) REQUIRES(!lock_);
static void NewTypeLoadedIfUsingJit(mirror::Class* type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// If debug info generation is turned on then write the type information for types already loaded
// into the specified class linker to the jit debug interface,
@@ -164,7 +165,7 @@
uint32_t dex_pc,
int32_t dex_pc_offset,
JValue* result)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool LoadCompilerLibrary(std::string* error_msg);
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index b1079dd..c9227b1 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -446,7 +446,7 @@
code_cache_(code_cache_in),
bitmap_(code_cache_->GetLiveBitmap()) {}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
if (method_header == nullptr) {
return true;
@@ -469,7 +469,7 @@
MarkCodeClosure(JitCodeCache* code_cache, Barrier* barrier)
: code_cache_(code_cache), barrier_(barrier) {}
- void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Run(Thread* thread) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedTrace trace(__PRETTY_FUNCTION__);
DCHECK(thread == Thread::Current() || thread->IsSuspended());
MarkCodeVisitor visitor(thread, code_cache_);
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 1938221..e15c93a 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -70,7 +70,7 @@
size_t DataCacheSize() REQUIRES(!lock_);
bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!lock_);
// Notify to the code cache that the compiler wants to use the
@@ -78,15 +78,15 @@
// and therefore ensure the returned profiling info object is not
// collected.
ProfilingInfo* NotifyCompilerUse(ArtMethod* method, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!lock_);
void DoneCompiling(ArtMethod* method, Thread* self, bool osr)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!lock_);
void DoneCompilerUse(ArtMethod* method, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!lock_);
// Allocate and write code and its metadata to the code cache.
@@ -99,7 +99,7 @@
const uint8_t* code,
size_t code_size,
bool osr)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!lock_);
// Return true if the code cache contains this pc.
@@ -110,12 +110,12 @@
// Reserve a region of data of size at least "size". Returns null if there is no more room.
uint8_t* ReserveData(Thread* self, size_t size, ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!lock_);
// Clear data from the data portion of the code cache.
void ClearData(Thread* self, void* data)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!lock_);
CodeCacheBitmap* GetLiveBitmap() const {
@@ -125,28 +125,28 @@
// Return whether we should do a full collection given the current state of the cache.
bool ShouldDoFullCollection()
REQUIRES(lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Perform a collection on the code cache.
void GarbageCollectCache(Thread* self)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Given the 'pc', try to find the JIT compiled code associated with it.
// Return null if 'pc' is not in the code cache. 'method' is passed for
// sanity check.
OatQuickMethodHeader* LookupMethodHeader(uintptr_t pc, ArtMethod* method)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
OatQuickMethodHeader* LookupOsrMethodHeader(ArtMethod* method)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Remove all methods in our cache that were allocated by 'alloc'.
void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void ClearGcRootsInInlineCaches(Thread* self) REQUIRES(!lock_);
@@ -157,7 +157,7 @@
const std::vector<uint32_t>& entries,
bool retry_allocation)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
return mspace == code_mspace_ || mspace == data_mspace_;
@@ -169,7 +169,7 @@
void GetProfiledMethods(const std::set<std::string>& dex_base_locations,
std::vector<MethodReference>& methods)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
uint64_t GetLastUpdateTimeNs() const;
@@ -182,7 +182,7 @@
void InvalidateCompiledCodeFor(ArtMethod* method, const OatQuickMethodHeader* code)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void Dump(std::ostream& os) REQUIRES(!lock_);
@@ -209,13 +209,13 @@
size_t code_size,
bool osr)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ProfilingInfo* AddProfilingInfoInternal(Thread* self,
ArtMethod* method,
const std::vector<uint32_t>& entries)
REQUIRES(lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// If a collection is in progress, wait for it to finish. Return
// whether the thread actually waited.
@@ -243,15 +243,15 @@
void DoCollection(Thread* self, bool collect_profiling_info)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void RemoveUnmarkedCode(Thread* self)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void MarkCompiledCodeOnThreadStacks(Thread* self)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool CheckLiveCompiledCodeHasProfilingInfo()
REQUIRES(lock_);
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 927681c..42916c3 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -189,7 +189,7 @@
: methods_(methods),
startup_method_samples_(startup_method_samples) {}
- virtual bool operator()(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) {
+ virtual bool operator()(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) {
if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) {
return true;
}
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index a890fbb..1056fac 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -53,7 +53,7 @@
return true;
}
- mirror::Class* GetMonomorphicType() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Class* GetMonomorphicType() const REQUIRES_SHARED(Locks::mutator_lock_) {
// Note that we cannot ensure the inline cache is actually monomorphic
// at this point, as other threads may have updated it.
DCHECK(!classes_[0].IsNull());
@@ -69,7 +69,7 @@
return !classes_[1].IsNull() && classes_[kIndividualCacheSize - 1].IsNull();
}
- mirror::Class* GetTypeAt(size_t i) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Class* GetTypeAt(size_t i) const REQUIRES_SHARED(Locks::mutator_lock_) {
return classes_[i].Read();
}
@@ -93,14 +93,14 @@
// Create a ProfilingInfo for 'method'. Return whether it succeeded, or if it is
// not needed in case the method does not have virtual/interface invocations.
static bool Create(Thread* self, ArtMethod* method, bool retry_allocation)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Add information from an executed INVOKE instruction to the profile.
void AddInvokeInfo(uint32_t dex_pc, mirror::Class* cls)
// Method should not be interruptible, as it manipulates the ProfilingInfo
// which can be concurrently collected.
REQUIRES(Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
template<typename RootVisitorType>
diff --git a/runtime/jni_env_ext.cc b/runtime/jni_env_ext.cc
index 40efc89..0358494 100644
--- a/runtime/jni_env_ext.cc
+++ b/runtime/jni_env_ext.cc
@@ -154,7 +154,7 @@
}
// Use some defining part of the caller's frame as the identifying mark for the JNI segment.
-static uintptr_t GetJavaCallFrame(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
+static uintptr_t GetJavaCallFrame(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
NthCallerVisitor zeroth_caller(self, 0, false);
zeroth_caller.WalkStack();
if (zeroth_caller.caller == nullptr) {
@@ -175,7 +175,7 @@
}
static std::string ComputeMonitorDescription(Thread* self,
- jobject obj) SHARED_REQUIRES(Locks::mutator_lock_) {
+ jobject obj) REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* o = self->DecodeJObject(obj);
if ((o->GetLockWord(false).GetState() == LockWord::kThinLocked) &&
Locks::mutator_lock_->IsExclusiveHeld(self)) {
@@ -196,12 +196,12 @@
uintptr_t frame,
ReferenceTable* monitors,
std::vector<std::pair<uintptr_t, jobject>>* locked_objects)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
auto kept_end = std::remove_if(
locked_objects->begin(),
locked_objects->end(),
[self, frame, monitors](const std::pair<uintptr_t, jobject>& pair)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (frame == pair.first) {
mirror::Object* o = self->DecodeJObject(pair.second);
monitors->Remove(o);
diff --git a/runtime/jni_env_ext.h b/runtime/jni_env_ext.h
index ac287d4..79dfb0d 100644
--- a/runtime/jni_env_ext.h
+++ b/runtime/jni_env_ext.h
@@ -39,16 +39,16 @@
~JNIEnvExt();
void DumpReferenceTables(std::ostream& os)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SetCheckJniEnabled(bool enabled);
- void PushFrame(int capacity) SHARED_REQUIRES(Locks::mutator_lock_);
- void PopFrame() SHARED_REQUIRES(Locks::mutator_lock_);
+ void PushFrame(int capacity) REQUIRES_SHARED(Locks::mutator_lock_);
+ void PopFrame() REQUIRES_SHARED(Locks::mutator_lock_);
template<typename T>
T AddLocalReference(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static Offset SegmentStateOffset(size_t pointer_size);
static Offset LocalRefCookieOffset(size_t pointer_size);
@@ -56,8 +56,8 @@
static jint GetEnvHandler(JavaVMExt* vm, /*out*/void** out, jint version);
- jobject NewLocalRef(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
- void DeleteLocalRef(jobject obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ jobject NewLocalRef(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
+ void DeleteLocalRef(jobject obj) REQUIRES_SHARED(Locks::mutator_lock_);
Thread* const self;
JavaVMExt* const vm;
@@ -92,13 +92,13 @@
// rules in CheckJNI mode.
// Record locking of a monitor.
- void RecordMonitorEnter(jobject obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ void RecordMonitorEnter(jobject obj) REQUIRES_SHARED(Locks::mutator_lock_);
// Check the release, that is, that the release is performed in the same JNI "segment."
- void CheckMonitorRelease(jobject obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ void CheckMonitorRelease(jobject obj) REQUIRES_SHARED(Locks::mutator_lock_);
// Check that no monitors are held that have been acquired in this JNI "segment."
- void CheckNoHeldMonitors() SHARED_REQUIRES(Locks::mutator_lock_);
+ void CheckNoHeldMonitors() REQUIRES_SHARED(Locks::mutator_lock_);
// Set the functions to the runtime shutdown functions.
void SetFunctionsToRuntimeShutdownFunctions();
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 7bcadd8..a434442 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -90,7 +90,7 @@
static void ThrowNoSuchMethodError(ScopedObjectAccess& soa, mirror::Class* c,
const char* name, const char* sig, const char* kind)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::string temp;
soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchMethodError;",
"no %s method \"%s.%s%s\"",
@@ -99,7 +99,7 @@
static void ReportInvalidJNINativeMethod(const ScopedObjectAccess& soa, mirror::Class* c,
const char* kind, jint idx, bool return_errors)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(return_errors ? ERROR : FATAL) << "Failed to register native method in "
<< PrettyDescriptor(c) << " in " << c->GetDexCache()->GetLocation()->ToModifiedUtf8()
<< ": " << kind << " is null at index " << idx;
@@ -108,7 +108,7 @@
}
static mirror::Class* EnsureInitialized(Thread* self, mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (LIKELY(klass->IsInitialized())) {
return klass;
}
@@ -122,7 +122,7 @@
static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class,
const char* name, const char* sig, bool is_static)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Class* c = EnsureInitialized(soa.Self(), soa.Decode<mirror::Class*>(jni_class));
if (c == nullptr) {
return nullptr;
@@ -149,7 +149,7 @@
}
static mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = soa.Self()->GetCurrentMethod(nullptr);
// If we are running Runtime.nativeLoad, use the overriding ClassLoader it set.
if (method == soa.DecodeMethod(WellKnownClasses::java_lang_Runtime_nativeLoad)) {
@@ -180,7 +180,7 @@
static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, const char* name,
const char* sig, bool is_static)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<2> hs(soa.Self());
Handle<mirror::Class> c(
hs.NewHandle(EnsureInitialized(soa.Self(), soa.Decode<mirror::Class*>(jni_class))));
@@ -228,7 +228,7 @@
static void ThrowAIOOBE(ScopedObjectAccess& soa, mirror::Array* array, jsize start,
jsize length, const char* identifier)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::string type(PrettyTypeOf(array));
soa.Self()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;",
"%s offset=%d length=%d %s.length=%d",
@@ -237,7 +237,7 @@
static void ThrowSIOOBE(ScopedObjectAccess& soa, jsize start, jsize length,
jsize array_length)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
soa.Self()->ThrowNewExceptionF("Ljava/lang/StringIndexOutOfBoundsException;",
"offset=%d length=%d string.length()=%d", start, length,
array_length);
@@ -315,7 +315,7 @@
template <bool kNative>
static ArtMethod* FindMethod(mirror::Class* c, const StringPiece& name, const StringPiece& sig)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
for (auto& method : c->GetMethods(pointer_size)) {
if (kNative == method.IsNative() && name == method.GetName() && method.GetSignature() == sig) {
@@ -2372,7 +2372,7 @@
private:
static jint EnsureLocalCapacityInternal(ScopedObjectAccess& soa, jint desired_capacity,
const char* caller)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// TODO: we should try to expand the table if necessary.
if (desired_capacity < 0 || desired_capacity > static_cast<jint>(kLocalsMax)) {
LOG(ERROR) << "Invalid capacity given to " << caller << ": " << desired_capacity;
@@ -2401,7 +2401,7 @@
template <typename JArrayT, typename ElementT, typename ArtArrayT>
static ArtArrayT* DecodeAndCheckArrayType(ScopedObjectAccess& soa, JArrayT java_array,
const char* fn_name, const char* operation)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtArrayT* array = soa.Decode<ArtArrayT*>(java_array);
if (UNLIKELY(ArtArrayT::GetArrayClass() != array->GetClass())) {
soa.Vm()->JniAbortF(fn_name,
@@ -2458,7 +2458,7 @@
static void ReleasePrimitiveArray(ScopedObjectAccess& soa, mirror::Array* array,
size_t component_size, void* elements, jint mode)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
void* array_data = array->GetRawData(component_size, 0);
gc::Heap* heap = Runtime::Current()->GetHeap();
bool is_copy = array_data != elements;
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 6495474..fe0081c 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -1105,8 +1105,9 @@
ExpectException(aioobe_); \
\
/* Prepare a couple of buffers. */ \
- std::unique_ptr<scalar_type[]> src_buf(new scalar_type[size]); \
- std::unique_ptr<scalar_type[]> dst_buf(new scalar_type[size]); \
+ /* NOLINT, no parentheses around scalar_type. */ \
+ std::unique_ptr<scalar_type[]> src_buf(new scalar_type[size]); /* NOLINT */ \
+ std::unique_ptr<scalar_type[]> dst_buf(new scalar_type[size]); /* NOLINT */ \
for (jsize i = 0; i < size; ++i) { src_buf[i] = scalar_type(i); } \
for (jsize i = 0; i < size; ++i) { dst_buf[i] = scalar_type(-1); } \
\
@@ -1131,7 +1132,7 @@
<< "GetPrimitiveArrayCritical not equal"; \
env_->ReleasePrimitiveArrayCritical(a, v, 0); \
/* GetXArrayElements */ \
- scalar_type* xs = env_->get_elements_fn(a, nullptr); \
+ scalar_type* xs = env_->get_elements_fn(a, nullptr); /* NOLINT, scalar_type */ \
EXPECT_EQ(memcmp(&src_buf[0], xs, size * sizeof(scalar_type)), 0) \
<< # get_elements_fn " not equal"; \
env_->release_elements_fn(a, xs, 0); \
diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h
index 4f714a6..22a3ea8 100644
--- a/runtime/mirror/abstract_method.h
+++ b/runtime/mirror/abstract_method.h
@@ -35,14 +35,14 @@
public:
// Called from Constructor::CreateFromArtMethod, Method::CreateFromArtMethod.
template <PointerSize kPointerSize, bool kTransactionActive>
- bool CreateFromArtMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_)
+ bool CreateFromArtMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
- ArtMethod* GetArtMethod() SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtMethod* GetArtMethod() REQUIRES_SHARED(Locks::mutator_lock_);
// Only used by the image writer.
template <bool kTransactionActive = false>
- void SetArtMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetArtMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
+ mirror::Class* GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_);
private:
static MemberOffset ArtMethodOffset() {
diff --git a/runtime/mirror/accessible_object.h b/runtime/mirror/accessible_object.h
index dcf5118..1d934a8 100644
--- a/runtime/mirror/accessible_object.h
+++ b/runtime/mirror/accessible_object.h
@@ -36,12 +36,12 @@
}
template<bool kTransactionActive>
- void SetAccessible(bool value) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetAccessible(bool value) REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(padding_);
return SetFieldBoolean<kTransactionActive>(FlagOffset(), value ? 1u : 0u);
}
- bool IsAccessible() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsAccessible() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldBoolean(FlagOffset());
}
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 014e54b..9d7f98f 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -101,7 +101,7 @@
}
void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Avoid AsArray as object is not yet in live bitmap or allocation stack.
Array* array = down_cast<Array*>(obj);
// DCHECK(array->IsArrayInstance());
@@ -125,7 +125,7 @@
}
void operator()(Object* obj, size_t usable_size) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Avoid AsArray as object is not yet in live bitmap or allocation stack.
Array* array = down_cast<Array*>(obj);
// DCHECK(array->IsArrayInstance());
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index 4128689..aee48cc 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -43,7 +43,7 @@
static Array* RecursiveCreateMultiArray(Thread* self,
Handle<Class> array_class, int current_dimension,
Handle<mirror::IntArray> dimensions)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
int32_t array_length = dimensions->Get(current_dimension);
StackHandleScope<1> hs(self);
Handle<Array> new_array(
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index ec10a43..6c82eb9 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -40,21 +40,21 @@
template <bool kIsInstrumented, bool kFillUsable = false>
ALWAYS_INLINE static Array* Alloc(Thread* self, Class* array_class, int32_t component_count,
size_t component_size_shift, gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static Array* CreateMultiArray(Thread* self, Handle<Class> element_class,
Handle<IntArray> dimensions)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- size_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_);
+ size_t SizeOf() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE int32_t GetLength() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE int32_t GetLength() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Array, length_));
}
- void SetLength(int32_t length) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetLength(int32_t length) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_GE(length, 0);
// We use non transactional version since we can't undo this write. We also disable checking
// since it would fail during a transaction.
@@ -68,7 +68,7 @@
static MemberOffset DataOffset(size_t component_size);
void* GetRawData(size_t component_size, int32_t index)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
intptr_t data = reinterpret_cast<intptr_t>(this) + DataOffset(component_size).Int32Value() +
+ (index * component_size);
return reinterpret_cast<void*>(data);
@@ -83,18 +83,18 @@
// Returns true if the index is valid. If not, throws an ArrayIndexOutOfBoundsException and
// returns false.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE bool CheckIsValidIndex(int32_t index) SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE bool CheckIsValidIndex(int32_t index) REQUIRES_SHARED(Locks::mutator_lock_);
- Array* CopyOf(Thread* self, int32_t new_length) SHARED_REQUIRES(Locks::mutator_lock_)
+ Array* CopyOf(Thread* self, int32_t new_length) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
protected:
- void ThrowArrayStoreException(Object* object) SHARED_REQUIRES(Locks::mutator_lock_)
+ void ThrowArrayStoreException(Object* object) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
private:
void ThrowArrayIndexOutOfBoundsException(int32_t index)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// The number of array elements.
int32_t length_;
@@ -110,32 +110,32 @@
typedef T ElementType;
static PrimitiveArray<T>* Alloc(Thread* self, size_t length)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
- const T* GetData() const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ const T* GetData() const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
return reinterpret_cast<const T*>(GetRawData(sizeof(T), 0));
}
- T* GetData() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ T* GetData() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
return reinterpret_cast<T*>(GetRawData(sizeof(T), 0));
}
- T Get(int32_t i) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
+ T Get(int32_t i) ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
- T GetWithoutChecks(int32_t i) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ T GetWithoutChecks(int32_t i) ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(CheckIsValidIndex(i)) << "i=" << i << " length=" << GetLength();
return GetData()[i];
}
- void Set(int32_t i, T value) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
+ void Set(int32_t i, T value) ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
// TODO fix thread safety analysis broken by the use of template. This should be
- // SHARED_REQUIRES(Locks::mutator_lock_).
+ // REQUIRES_SHARED(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true>
void Set(int32_t i, T value) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS;
// TODO fix thread safety analysis broken by the use of template. This should be
- // SHARED_REQUIRES(Locks::mutator_lock_).
+ // REQUIRES_SHARED(Locks::mutator_lock_).
template<bool kTransactionActive,
bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -147,7 +147,7 @@
* and the arrays non-null.
*/
void Memmove(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, int32_t count)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Works like memcpy(), except we guarantee not to allow tearing of array values (ie using
@@ -155,7 +155,7 @@
* and the arrays non-null.
*/
void Memcpy(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, int32_t count)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void SetArrayClass(Class* array_class) {
CHECK(array_class_.IsNull());
@@ -164,7 +164,7 @@
}
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- static Class* GetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static Class* GetArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!array_class_.IsNull());
return array_class_.Read<kReadBarrierOption>();
}
@@ -174,7 +174,7 @@
array_class_ = GcRoot<Class>(nullptr);
}
- static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
private:
static GcRoot<Class> array_class_;
@@ -189,14 +189,14 @@
VerifyObjectFlags kVerifyFlags = kVerifyNone,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
T GetElementPtrSize(uint32_t idx, PointerSize ptr_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive = false, bool kUnchecked = false>
void SetElementPtrSize(uint32_t idx, uint64_t element, PointerSize ptr_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive = false, bool kUnchecked = false, typename T>
void SetElementPtrSize(uint32_t idx, T* element, PointerSize ptr_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Fixup the pointers in the dest arrays by passing our pointers through the visitor. Only copies
// to dest if visitor(source_ptr) != source_ptr.
@@ -204,7 +204,7 @@
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
void Fixup(mirror::PointerArray* dest, PointerSize pointer_size, const Visitor& visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
};
} // namespace mirror
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 0f2aac2..d1d8caa 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -42,11 +42,19 @@
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline uint32_t Class::GetObjectSize() {
// Note: Extra parentheses to avoid the comma being interpreted as macro parameter separator.
- DCHECK((!IsVariableSize<kVerifyFlags, kReadBarrierOption>())) << " class=" << PrettyTypeOf(this);
+ DCHECK((!IsVariableSize<kVerifyFlags, kReadBarrierOption>())) << "class=" << PrettyTypeOf(this);
return GetField32(ObjectSizeOffset());
}
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+inline uint32_t Class::GetObjectSizeAllocFastPath() {
+ // Note: Extra parentheses to avoid the comma being interpreted as macro parameter separator.
+ DCHECK((!IsVariableSize<kVerifyFlags, kReadBarrierOption>())) << "class=" << PrettyTypeOf(this);
+ return GetField32(ObjectSizeAllocFastPathOffset());
+}
+
+
+template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline Class* Class::GetSuperClass() {
// Can only get super class for loaded classes (hack for when runtime is
// initializing)
@@ -861,6 +869,8 @@
klass->SetPrimitiveType(Primitive::kPrimNot); // Default to not being primitive.
klass->SetDexClassDefIndex(DexFile::kDexNoIndex16); // Default to no valid class def index.
klass->SetDexTypeIndex(DexFile::kDexNoIndex16); // Default to no valid type index.
+ // Default to force slow path until initialized.
+ klass->SetObjectSizeAllocFastPath(std::numeric_limits<uint32_t>::max());
}
inline void Class::SetAccessFlags(uint32_t new_access_flags) {
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index f948be7..c979c28 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -100,9 +100,20 @@
}
static_assert(sizeof(Status) == sizeof(uint32_t), "Size of status not equal to uint32");
if (Runtime::Current()->IsActiveTransaction()) {
- h_this->SetField32Volatile<true>(OFFSET_OF_OBJECT_MEMBER(Class, status_), new_status);
+ h_this->SetField32Volatile<true>(StatusOffset(), new_status);
} else {
- h_this->SetField32Volatile<false>(OFFSET_OF_OBJECT_MEMBER(Class, status_), new_status);
+ h_this->SetField32Volatile<false>(StatusOffset(), new_status);
+ }
+
+ // Setting the object size alloc fast path needs to be after the status write so that if the
+ // alloc path sees a valid object size, we would know that it's initialized as long as it has a
+ // load-acquire/fake dependency.
+ if (new_status == kStatusInitialized && !h_this->IsVariableSize()) {
+ DCHECK_EQ(h_this->GetObjectSizeAllocFastPath(), std::numeric_limits<uint32_t>::max());
+ // Finalizable objects must always go slow path.
+ if (!h_this->IsFinalizable()) {
+ h_this->SetObjectSizeAllocFastPath(RoundUp(h_this->GetObjectSize(), kObjectAlignment));
+ }
}
if (!class_linker_initialized) {
@@ -137,7 +148,7 @@
if (kIsDebugBuild && new_class_size < GetClassSize()) {
DumpClass(LOG(INTERNAL_FATAL), kDumpClassFullDetail);
LOG(INTERNAL_FATAL) << new_class_size << " vs " << GetClassSize();
- LOG(FATAL) << " class=" << PrettyTypeOf(this);
+ LOG(FATAL) << "class=" << PrettyTypeOf(this);
}
// Not called within a transaction.
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_), new_class_size);
@@ -641,7 +652,7 @@
static ArtField* FindFieldByNameAndType(LengthPrefixedArray<ArtField>* fields,
const StringPiece& name,
const StringPiece& type)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (fields == nullptr) {
return nullptr;
}
@@ -952,14 +963,14 @@
bool is_static ATTRIBUTE_UNUSED) const {}
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
}
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* old_ref = root->AsMirrorPtr();
mirror::Object* new_ref = ReadBarrier::BarrierForRoot(root);
if (old_ref != new_ref) {
@@ -987,7 +998,7 @@
}
void operator()(mirror::Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<1> hs(self_);
Handle<mirror::Class> h_new_class_obj(hs.NewHandle(obj->AsClass()));
mirror::Object::CopyObject(self_, h_new_class_obj.Get(), orig_->Get(), copy_bytes_);
@@ -1209,5 +1220,13 @@
return flags;
}
+void Class::SetObjectSizeAllocFastPath(uint32_t new_object_size) {
+ if (Runtime::Current()->IsActiveTransaction()) {
+ SetField32Volatile<true>(ObjectSizeAllocFastPathOffset(), new_object_size);
+ } else {
+ SetField32Volatile<false>(ObjectSizeAllocFastPathOffset(), new_object_size);
+ }
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 21af15e..99b7769 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -133,7 +133,7 @@
};
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- Status GetStatus() SHARED_REQUIRES(Locks::mutator_lock_) {
+ Status GetStatus() REQUIRES_SHARED(Locks::mutator_lock_) {
static_assert(sizeof(Status) == sizeof(uint32_t), "Size of status not equal to uint32");
return static_cast<Status>(
GetField32Volatile<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, status_)));
@@ -141,7 +141,7 @@
// This is static because 'this' may be moved by GC.
static void SetStatus(Handle<Class> h_this, Status new_status, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static MemberOffset StatusOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, status_);
@@ -149,157 +149,157 @@
// Returns true if the class has been retired.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsRetired() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsRetired() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() == kStatusRetired;
}
// Returns true if the class has failed to link.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsErroneous() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsErroneous() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() == kStatusError;
}
// Returns true if the class has been loaded.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsIdxLoaded() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsIdxLoaded() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() >= kStatusIdx;
}
// Returns true if the class has been loaded.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsLoaded() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsLoaded() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() >= kStatusLoaded;
}
// Returns true if the class has been linked.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsResolved() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsResolved() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() >= kStatusResolved;
}
// Returns true if the class was compile-time verified.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsCompileTimeVerified() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsCompileTimeVerified() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() >= kStatusRetryVerificationAtRuntime;
}
// Returns true if the class has been verified.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsVerified() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsVerified() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() >= kStatusVerified;
}
// Returns true if the class is initializing.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsInitializing() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsInitializing() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() >= kStatusInitializing;
}
// Returns true if the class is initialized.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsInitialized() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsInitialized() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() == kStatusInitialized;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t GetAccessFlags() REQUIRES_SHARED(Locks::mutator_lock_);
static MemberOffset AccessFlagsOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, access_flags_);
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE uint32_t GetClassFlags() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE uint32_t GetClassFlags() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, class_flags_));
}
- void SetClassFlags(uint32_t new_flags) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetClassFlags(uint32_t new_flags) REQUIRES_SHARED(Locks::mutator_lock_);
- void SetAccessFlags(uint32_t new_access_flags) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetAccessFlags(uint32_t new_access_flags) REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if the class is an interface.
- ALWAYS_INLINE bool IsInterface() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsInterface() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccInterface) != 0;
}
// Returns true if the class is declared public.
- ALWAYS_INLINE bool IsPublic() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsPublic() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccPublic) != 0;
}
// Returns true if the class is declared final.
- ALWAYS_INLINE bool IsFinal() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsFinal() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccFinal) != 0;
}
- ALWAYS_INLINE bool IsFinalizable() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsFinalizable() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccClassIsFinalizable) != 0;
}
- ALWAYS_INLINE void SetRecursivelyInitialized() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE void SetRecursivelyInitialized() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_EQ(GetLockOwnerThreadId(), Thread::Current()->GetThreadId());
uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
SetAccessFlags(flags | kAccRecursivelyInitialized);
}
- ALWAYS_INLINE void SetHasDefaultMethods() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE void SetHasDefaultMethods() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_EQ(GetLockOwnerThreadId(), Thread::Current()->GetThreadId());
uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
SetAccessFlags(flags | kAccHasDefaultMethod);
}
- ALWAYS_INLINE void SetFinalizable() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE void SetFinalizable() REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
SetAccessFlags(flags | kAccClassIsFinalizable);
}
- ALWAYS_INLINE bool IsStringClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsStringClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetClassFlags() & kClassFlagString) != 0;
}
- ALWAYS_INLINE void SetStringClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE void SetStringClass() REQUIRES_SHARED(Locks::mutator_lock_) {
SetClassFlags(kClassFlagString | kClassFlagNoReferenceFields);
}
- ALWAYS_INLINE bool IsClassLoaderClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsClassLoaderClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetClassFlags() == kClassFlagClassLoader;
}
- ALWAYS_INLINE void SetClassLoaderClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE void SetClassLoaderClass() REQUIRES_SHARED(Locks::mutator_lock_) {
SetClassFlags(kClassFlagClassLoader);
}
- ALWAYS_INLINE bool IsDexCacheClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsDexCacheClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetClassFlags() & kClassFlagDexCache) != 0;
}
- ALWAYS_INLINE void SetDexCacheClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE void SetDexCacheClass() REQUIRES_SHARED(Locks::mutator_lock_) {
SetClassFlags(GetClassFlags() | kClassFlagDexCache);
}
// Returns true if the class is abstract.
- ALWAYS_INLINE bool IsAbstract() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsAbstract() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccAbstract) != 0;
}
// Returns true if the class is an annotation.
- ALWAYS_INLINE bool IsAnnotation() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsAnnotation() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccAnnotation) != 0;
}
// Returns true if the class is synthetic.
- ALWAYS_INLINE bool IsSynthetic() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsSynthetic() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccSynthetic) != 0;
}
// Return whether the class had run the verifier at least once.
// This does not necessarily mean that access checks are avoidable,
// since the class methods might still need to be run with access checks.
- bool WasVerificationAttempted() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool WasVerificationAttempted() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccSkipAccessChecks) != 0;
}
// Mark the class as having gone through a verification attempt.
// Mutually exclusive from whether or not each method is allowed to skip access checks.
- void SetVerificationAttempted() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetVerificationAttempted() REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
if ((flags & kAccVerificationAttempted) == 0) {
SetAccessFlags(flags | kAccVerificationAttempted);
@@ -307,27 +307,27 @@
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsTypeOfReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsTypeOfReferenceClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetClassFlags<kVerifyFlags>() & kClassFlagReference) != 0;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsWeakReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsWeakReferenceClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetClassFlags<kVerifyFlags>() == kClassFlagWeakReference;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsSoftReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsSoftReferenceClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetClassFlags<kVerifyFlags>() == kClassFlagSoftReference;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsFinalizerReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsFinalizerReferenceClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetClassFlags<kVerifyFlags>() == kClassFlagFinalizerReference;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPhantomReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPhantomReferenceClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetClassFlags<kVerifyFlags>() == kClassFlagPhantomReference;
}
@@ -336,7 +336,7 @@
// For array classes, where all the classes are final due to there being no sub-classes, an
// Object[] may be assigned to by a String[] but a String[] may not be assigned to by other
// types as the component is final.
- bool CannotBeAssignedFromOtherTypes() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool CannotBeAssignedFromOtherTypes() REQUIRES_SHARED(Locks::mutator_lock_) {
if (!IsArrayClass()) {
return IsFinal();
} else {
@@ -351,19 +351,19 @@
// Returns true if this class is the placeholder and should retire and
// be replaced with a class with the right size for embedded imt/vtable.
- bool IsTemp() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsTemp() REQUIRES_SHARED(Locks::mutator_lock_) {
Status s = GetStatus();
return s < Status::kStatusResolving && ShouldHaveEmbeddedVTable();
}
- String* GetName() SHARED_REQUIRES(Locks::mutator_lock_); // Returns the cached name.
- void SetName(String* name) SHARED_REQUIRES(Locks::mutator_lock_); // Sets the cached name.
+ String* GetName() REQUIRES_SHARED(Locks::mutator_lock_); // Returns the cached name.
+ void SetName(String* name) REQUIRES_SHARED(Locks::mutator_lock_); // Sets the cached name.
// Computes the name, then sets the cached value.
- static String* ComputeName(Handle<Class> h_this) SHARED_REQUIRES(Locks::mutator_lock_)
+ static String* ComputeName(Handle<Class> h_this) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsProxyClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsProxyClass() REQUIRES_SHARED(Locks::mutator_lock_) {
// Read access flags without using getter as whether something is a proxy can be check in
// any loaded state
// TODO: switch to a check if the super class is java.lang.reflect.Proxy?
@@ -376,9 +376,9 @@
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- Primitive::Type GetPrimitiveType() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
+ Primitive::Type GetPrimitiveType() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
- void SetPrimitiveType(Primitive::Type new_type) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetPrimitiveType(Primitive::Type new_type) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_EQ(sizeof(Primitive::Type), sizeof(int32_t));
uint32_t v32 = static_cast<uint32_t>(new_type);
DCHECK_EQ(v32 & kPrimitiveTypeMask, v32) << "upper 16 bits aren't zero";
@@ -388,81 +388,81 @@
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- size_t GetPrimitiveTypeSizeShift() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
+ size_t GetPrimitiveTypeSizeShift() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if the class is a primitive type.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitive() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPrimitive() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() != Primitive::kPrimNot;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveBoolean() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPrimitiveBoolean() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimBoolean;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveByte() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPrimitiveByte() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimByte;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveChar() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPrimitiveChar() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimChar;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveShort() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPrimitiveShort() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimShort;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveInt() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPrimitiveInt() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetPrimitiveType() == Primitive::kPrimInt;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveLong() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPrimitiveLong() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimLong;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveFloat() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPrimitiveFloat() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimFloat;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveDouble() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPrimitiveDouble() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimDouble;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveVoid() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPrimitiveVoid() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimVoid;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveArray() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPrimitiveArray() REQUIRES_SHARED(Locks::mutator_lock_) {
return IsArrayClass<kVerifyFlags>() &&
GetComponentType<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>()->
IsPrimitive();
}
// Depth of class from java.lang.Object
- uint32_t Depth() SHARED_REQUIRES(Locks::mutator_lock_);
+ uint32_t Depth() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsArrayClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsArrayClass() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsClassClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsClassClass() REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsThrowableClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsThrowableClass() REQUIRES_SHARED(Locks::mutator_lock_);
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsReferenceClass() const SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsReferenceClass() const REQUIRES_SHARED(Locks::mutator_lock_);
static MemberOffset ComponentTypeOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, component_type_);
@@ -470,9 +470,9 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- Class* GetComponentType() SHARED_REQUIRES(Locks::mutator_lock_);
+ Class* GetComponentType() REQUIRES_SHARED(Locks::mutator_lock_);
- void SetComponentType(Class* new_component_type) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetComponentType(Class* new_component_type) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(GetComponentType() == nullptr);
DCHECK(new_component_type != nullptr);
// Component type is invariant: use non-transactional mode without check.
@@ -480,46 +480,46 @@
}
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- size_t GetComponentSize() SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t GetComponentSize() REQUIRES_SHARED(Locks::mutator_lock_) {
return 1U << GetComponentSizeShift<kReadBarrierOption>();
}
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- size_t GetComponentSizeShift() SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t GetComponentSizeShift() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetComponentType<kDefaultVerifyFlags, kReadBarrierOption>()->GetPrimitiveTypeSizeShift();
}
- bool IsObjectClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsObjectClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return !IsPrimitive() && GetSuperClass() == nullptr;
}
- bool IsInstantiableNonArray() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsInstantiableNonArray() REQUIRES_SHARED(Locks::mutator_lock_) {
return !IsPrimitive() && !IsInterface() && !IsAbstract() && !IsArrayClass();
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsInstantiable() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsInstantiable() REQUIRES_SHARED(Locks::mutator_lock_) {
return (!IsPrimitive() && !IsInterface() && !IsAbstract()) ||
(IsAbstract() && IsArrayClass<kVerifyFlags, kReadBarrierOption>());
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsObjectArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsObjectArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Class* const component_type = GetComponentType<kVerifyFlags, kReadBarrierOption>();
return component_type != nullptr && !component_type->IsPrimitive();
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsIntArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsIntArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
auto* component_type = GetComponentType<kVerifyFlags>();
return component_type != nullptr && component_type->template IsPrimitiveInt<kNewFlags>();
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsLongArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsLongArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
auto* component_type = GetComponentType<kVerifyFlags>();
return component_type != nullptr && component_type->template IsPrimitiveLong<kNewFlags>();
@@ -528,16 +528,16 @@
// Creates a raw object instance but does not invoke the default constructor.
template<bool kIsInstrumented, bool kCheckAddFinalizer = true>
ALWAYS_INLINE Object* Alloc(Thread* self, gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
Object* AllocObject(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
Object* AllocNonMovableObject(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsVariableSize() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsVariableSize() REQUIRES_SHARED(Locks::mutator_lock_) {
// Classes, arrays, and strings vary in size, and so the object_size_ field cannot
// be used to Get their instance size
return IsClassClass<kVerifyFlags, kReadBarrierOption>() ||
@@ -546,17 +546,17 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- uint32_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t SizeOf() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_));
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- uint32_t GetClassSize() SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t GetClassSize() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_));
}
void SetClassSize(uint32_t new_class_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Compute how many bytes would be used a class with the given elements.
static uint32_t ComputeClassSize(bool has_embedded_vtable,
@@ -571,7 +571,7 @@
// The size of java.lang.Class.class.
static uint32_t ClassClassSize(PointerSize pointer_size) {
// The number of vtable entries in java.lang.Class.
- uint32_t vtable_entries = Object::kVTableLength + 71;
+ uint32_t vtable_entries = Object::kVTableLength + 72;
return ComputeClassSize(true, vtable_entries, 0, 0, 4, 1, 0, pointer_size);
}
@@ -582,31 +582,40 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- uint32_t GetObjectSize() SHARED_REQUIRES(Locks::mutator_lock_);
+ uint32_t GetObjectSize() REQUIRES_SHARED(Locks::mutator_lock_);
static MemberOffset ObjectSizeOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, object_size_);
}
+ static MemberOffset ObjectSizeAllocFastPathOffset() {
+ return OFFSET_OF_OBJECT_MEMBER(Class, object_size_alloc_fast_path_);
+ }
- void SetObjectSize(uint32_t new_object_size) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetObjectSize(uint32_t new_object_size) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!IsVariableSize());
// Not called within a transaction.
return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size);
}
+ void SetObjectSizeAllocFastPath(uint32_t new_object_size) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ uint32_t GetObjectSizeAllocFastPath() REQUIRES_SHARED(Locks::mutator_lock_);
+
void SetObjectSizeWithoutChecks(uint32_t new_object_size)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Not called within a transaction.
return SetField32<false, false, kVerifyNone>(
OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size);
}
// Returns true if this class is in the same packages as that class.
- bool IsInSamePackage(Class* that) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsInSamePackage(Class* that) REQUIRES_SHARED(Locks::mutator_lock_);
static bool IsInSamePackage(const StringPiece& descriptor1, const StringPiece& descriptor2);
// Returns true if this class can access that class.
- bool CanAccess(Class* that) SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool CanAccess(Class* that) REQUIRES_SHARED(Locks::mutator_lock_) {
return that->IsPublic() || this->IsInSamePackage(that);
}
@@ -614,7 +623,7 @@
// Note that access to the class isn't checked in case the declaring class is protected and the
// method has been exposed by a public sub-class
bool CanAccessMember(Class* access_to, uint32_t member_flags)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Classes can access all of their own members
if (this == access_to) {
return true;
@@ -642,40 +651,40 @@
// referenced by the FieldId in the DexFile in case the declaring class is inaccessible.
bool CanAccessResolvedField(Class* access_to, ArtField* field,
DexCache* dex_cache, uint32_t field_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool CheckResolvedFieldAccess(Class* access_to, ArtField* field,
uint32_t field_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Can this class access a resolved method?
// Note that access to methods's class is checked and this may require looking up the class
// referenced by the MethodId in the DexFile in case the declaring class is inaccessible.
bool CanAccessResolvedMethod(Class* access_to, ArtMethod* resolved_method,
DexCache* dex_cache, uint32_t method_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <InvokeType throw_invoke_type>
bool CheckResolvedMethodAccess(Class* access_to, ArtMethod* resolved_method,
uint32_t method_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsSubClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsSubClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
// Can src be assigned to this class? For example, String can be assigned to Object (by an
// upcast), however, an Object cannot be assigned to a String as a potentially exception throwing
// downcast would be necessary. Similarly for interfaces, a class that implements (or an interface
// that extends) another can be assigned to its parent, but not vice-versa. All Classes may assign
// to themselves. Classes for primitive types may not assign to each other.
- ALWAYS_INLINE bool IsAssignableFrom(Class* src) SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE bool IsAssignableFrom(Class* src) REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- ALWAYS_INLINE Class* GetSuperClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE Class* GetSuperClass() REQUIRES_SHARED(Locks::mutator_lock_);
// Get first common super class. It will never return null.
// `This` and `klass` must be classes.
- Class* GetCommonSuperClass(Handle<Class> klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ Class* GetCommonSuperClass(Handle<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
- void SetSuperClass(Class* new_super_class) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetSuperClass(Class* new_super_class) REQUIRES_SHARED(Locks::mutator_lock_) {
// Super class is assigned once, except during class linker initialization.
Class* old_super_class = GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_));
DCHECK(old_super_class == nullptr || old_super_class == new_super_class);
@@ -683,7 +692,7 @@
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_), new_super_class);
}
- bool HasSuperClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool HasSuperClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetSuperClass() != nullptr;
}
@@ -691,9 +700,9 @@
return MemberOffset(OFFSETOF_MEMBER(Class, super_class_));
}
- ClassLoader* GetClassLoader() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
+ ClassLoader* GetClassLoader() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
- void SetClassLoader(ClassLoader* new_cl) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetClassLoader(ClassLoader* new_cl) REQUIRES_SHARED(Locks::mutator_lock_);
static MemberOffset DexCacheOffset() {
return MemberOffset(OFFSETOF_MEMBER(Class, dex_cache_));
@@ -705,125 +714,125 @@
kDumpClassInitialized = (1 << 2),
};
- void DumpClass(std::ostream& os, int flags) SHARED_REQUIRES(Locks::mutator_lock_);
+ void DumpClass(std::ostream& os, int flags) REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
+ DexCache* GetDexCache() REQUIRES_SHARED(Locks::mutator_lock_);
// Also updates the dex_cache_strings_ variable from new_dex_cache.
- void SetDexCache(DexCache* new_dex_cache) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetDexCache(DexCache* new_dex_cache) REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDirectMethods(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE LengthPrefixedArray<ArtMethod>* GetMethodsPtr()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static MemberOffset MethodsOffset() {
return MemberOffset(OFFSETOF_MEMBER(Class, methods_));
}
ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetMethods(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SetMethodsPtr(LengthPrefixedArray<ArtMethod>* new_methods,
uint32_t num_direct,
uint32_t num_virtual)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Used by image writer.
void SetMethodsPtrUnchecked(LengthPrefixedArray<ArtMethod>* new_methods,
uint32_t num_direct,
uint32_t num_virtual)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE ArraySlice<ArtMethod> GetDirectMethodsSlice(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE ArtMethod* GetDirectMethod(size_t i, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Use only when we are allocating populating the method arrays.
ALWAYS_INLINE ArtMethod* GetDirectMethodUnchecked(size_t i, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE ArtMethod* GetVirtualMethodUnchecked(size_t i, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the number of static, private, and constructor methods.
- ALWAYS_INLINE uint32_t NumDirectMethods() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t NumDirectMethods() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE ArraySlice<ArtMethod> GetMethodsSlice(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredMethodsSlice(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDeclaredMethods(
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <PointerSize kPointerSize, bool kTransactionActive>
static Method* GetDeclaredMethodInternal(Thread* self,
mirror::Class* klass,
mirror::String* name,
mirror::ObjectArray<mirror::Class>* args)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <PointerSize kPointerSize, bool kTransactionActive>
static Constructor* GetDeclaredConstructorInternal(Thread* self,
mirror::Class* klass,
mirror::ObjectArray<mirror::Class>* args)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredVirtualMethodsSlice(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDeclaredVirtualMethods(
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE ArraySlice<ArtMethod> GetCopiedMethodsSlice(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetCopiedMethods(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE ArraySlice<ArtMethod> GetVirtualMethodsSlice(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetVirtualMethods(
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the number of non-inherited virtual methods (sum of declared and copied methods).
- ALWAYS_INLINE uint32_t NumVirtualMethods() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t NumVirtualMethods() REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the number of copied virtual methods.
- ALWAYS_INLINE uint32_t NumCopiedVirtualMethods() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t NumCopiedVirtualMethods() REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the number of declared virtual methods.
- ALWAYS_INLINE uint32_t NumDeclaredVirtualMethods() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t NumDeclaredVirtualMethods() REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE uint32_t NumMethods() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t NumMethods() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ArtMethod* GetVirtualMethod(size_t i, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* GetVirtualMethodDuringLinking(size_t i, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- ALWAYS_INLINE PointerArray* GetVTable() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE PointerArray* GetVTable() REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE PointerArray* GetVTableDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE PointerArray* GetVTableDuringLinking() REQUIRES_SHARED(Locks::mutator_lock_);
- void SetVTable(PointerArray* new_vtable) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetVTable(PointerArray* new_vtable) REQUIRES_SHARED(Locks::mutator_lock_);
static MemberOffset VTableOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, vtable_);
@@ -841,212 +850,212 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool ShouldHaveImt() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool ShouldHaveImt() REQUIRES_SHARED(Locks::mutator_lock_) {
return ShouldHaveEmbeddedVTable<kVerifyFlags, kReadBarrierOption>();
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool ShouldHaveEmbeddedVTable() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool ShouldHaveEmbeddedVTable() REQUIRES_SHARED(Locks::mutator_lock_) {
return IsInstantiable<kVerifyFlags, kReadBarrierOption>();
}
- bool HasVTable() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool HasVTable() REQUIRES_SHARED(Locks::mutator_lock_);
static MemberOffset EmbeddedVTableEntryOffset(uint32_t i, PointerSize pointer_size);
- int32_t GetVTableLength() SHARED_REQUIRES(Locks::mutator_lock_);
+ int32_t GetVTableLength() REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* GetVTableEntry(uint32_t i, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- int32_t GetEmbeddedVTableLength() SHARED_REQUIRES(Locks::mutator_lock_);
+ int32_t GetEmbeddedVTableLength() REQUIRES_SHARED(Locks::mutator_lock_);
- void SetEmbeddedVTableLength(int32_t len) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetEmbeddedVTableLength(int32_t len) REQUIRES_SHARED(Locks::mutator_lock_);
- ImTable* GetImt(PointerSize pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ ImTable* GetImt(PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_);
- void SetImt(ImTable* imt, PointerSize pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetImt(ImTable* imt, PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* GetEmbeddedVTableEntry(uint32_t i, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
inline void SetEmbeddedVTableEntryUnchecked(uint32_t i,
ArtMethod* method,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void PopulateEmbeddedVTable(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Given a method implemented by this class but potentially from a super class, return the
// specific implementation method for this class.
ArtMethod* FindVirtualMethodForVirtual(ArtMethod* method, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Given a method implemented by this class' super class, return the specific implementation
// method for this class.
ArtMethod* FindVirtualMethodForSuper(ArtMethod* method, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Given a method from some implementor of this interface, return the specific implementation
// method for this class.
ArtMethod* FindVirtualMethodForInterfaceSuper(ArtMethod* method, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Given a method implemented by this class, but potentially from a
// super class or interface, return the specific implementation
// method for this class.
ArtMethod* FindVirtualMethodForInterface(ArtMethod* method, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE;
+ REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE;
ArtMethod* FindVirtualMethodForVirtualOrInterface(ArtMethod* method, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindInterfaceMethod(const StringPiece& name, const StringPiece& signature,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindInterfaceMethod(const StringPiece& name, const Signature& signature,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindDirectMethod(const StringPiece& name, const StringPiece& signature,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindDirectMethod(const StringPiece& name, const Signature& signature,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindDeclaredVirtualMethodByName(const StringPiece& name, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindDeclaredDirectMethodByName(const StringPiece& name, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindVirtualMethod(const StringPiece& name, const StringPiece& signature,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindVirtualMethod(const StringPiece& name, const Signature& signature,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindClassInitializer(PointerSize pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtMethod* FindClassInitializer(PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_);
- bool HasDefaultMethods() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool HasDefaultMethods() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccHasDefaultMethod) != 0;
}
- bool HasBeenRecursivelyInitialized() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool HasBeenRecursivelyInitialized() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccRecursivelyInitialized) != 0;
}
- ALWAYS_INLINE int32_t GetIfTableCount() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE int32_t GetIfTableCount() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- ALWAYS_INLINE IfTable* GetIfTable() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE IfTable* GetIfTable() REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE void SetIfTable(IfTable* new_iftable) SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE void SetIfTable(IfTable* new_iftable) REQUIRES_SHARED(Locks::mutator_lock_);
// Get instance fields of the class (See also GetSFields).
- LengthPrefixedArray<ArtField>* GetIFieldsPtr() SHARED_REQUIRES(Locks::mutator_lock_);
+ LengthPrefixedArray<ArtField>* GetIFieldsPtr() REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE IterationRange<StrideIterator<ArtField>> GetIFields()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SetIFieldsPtr(LengthPrefixedArray<ArtField>* new_ifields)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Unchecked edition has no verification flags.
void SetIFieldsPtrUnchecked(LengthPrefixedArray<ArtField>* new_sfields)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- uint32_t NumInstanceFields() SHARED_REQUIRES(Locks::mutator_lock_);
- ArtField* GetInstanceField(uint32_t i) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint32_t NumInstanceFields() REQUIRES_SHARED(Locks::mutator_lock_);
+ ArtField* GetInstanceField(uint32_t i) REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the number of instance fields containing reference types. Does not count fields in any
// super classes.
- uint32_t NumReferenceInstanceFields() SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t NumReferenceInstanceFields() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsResolved() || IsErroneous());
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_));
}
- uint32_t NumReferenceInstanceFieldsDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t NumReferenceInstanceFieldsDuringLinking() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsLoaded() || IsErroneous());
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_));
}
- void SetNumReferenceInstanceFields(uint32_t new_num) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetNumReferenceInstanceFields(uint32_t new_num) REQUIRES_SHARED(Locks::mutator_lock_) {
// Not called within a transaction.
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_), new_num);
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- uint32_t GetReferenceInstanceOffsets() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
+ uint32_t GetReferenceInstanceOffsets() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
void SetReferenceInstanceOffsets(uint32_t new_reference_offsets)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get the offset of the first reference instance field. Other reference instance fields follow.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
MemberOffset GetFirstReferenceInstanceFieldOffset()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the number of static fields containing reference types.
- uint32_t NumReferenceStaticFields() SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t NumReferenceStaticFields() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsResolved() || IsErroneous());
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_));
}
- uint32_t NumReferenceStaticFieldsDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t NumReferenceStaticFieldsDuringLinking() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsLoaded() || IsErroneous() || IsRetired());
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_));
}
- void SetNumReferenceStaticFields(uint32_t new_num) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetNumReferenceStaticFields(uint32_t new_num) REQUIRES_SHARED(Locks::mutator_lock_) {
// Not called within a transaction.
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_), new_num);
}
@@ -1055,53 +1064,53 @@
template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
MemberOffset GetFirstReferenceStaticFieldOffset(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get the offset of the first reference static field. Other reference static fields follow.
MemberOffset GetFirstReferenceStaticFieldOffsetDuringLinking(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Gets the static fields of the class.
- LengthPrefixedArray<ArtField>* GetSFieldsPtr() SHARED_REQUIRES(Locks::mutator_lock_);
+ LengthPrefixedArray<ArtField>* GetSFieldsPtr() REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE IterationRange<StrideIterator<ArtField>> GetSFields()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SetSFieldsPtr(LengthPrefixedArray<ArtField>* new_sfields)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Unchecked edition has no verification flags.
void SetSFieldsPtrUnchecked(LengthPrefixedArray<ArtField>* new_sfields)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- uint32_t NumStaticFields() SHARED_REQUIRES(Locks::mutator_lock_);
+ uint32_t NumStaticFields() REQUIRES_SHARED(Locks::mutator_lock_);
// TODO: uint16_t
- ArtField* GetStaticField(uint32_t i) SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtField* GetStaticField(uint32_t i) REQUIRES_SHARED(Locks::mutator_lock_);
// Find a static or instance field using the JLS resolution order
static ArtField* FindField(Thread* self, Handle<Class> klass, const StringPiece& name,
const StringPiece& type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Finds the given instance field in this class or a superclass.
ArtField* FindInstanceField(const StringPiece& name, const StringPiece& type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Finds the given instance field in this class or a superclass, only searches classes that
// have the same dex cache.
ArtField* FindInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtField* FindDeclaredInstanceField(const StringPiece& name, const StringPiece& type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtField* FindDeclaredInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Finds the given static field in this class or a superclass.
static ArtField* FindStaticField(Thread* self, Handle<Class> klass, const StringPiece& name,
const StringPiece& type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Finds the given static field in this class or superclass, only searches classes that
// have the same dex cache.
@@ -1109,122 +1118,122 @@
Class* klass,
const DexCache* dex_cache,
uint32_t dex_field_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtField* FindDeclaredStaticField(const StringPiece& name, const StringPiece& type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtField* FindDeclaredStaticField(const DexCache* dex_cache, uint32_t dex_field_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- pid_t GetClinitThreadId() SHARED_REQUIRES(Locks::mutator_lock_) {
+ pid_t GetClinitThreadId() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsIdxLoaded() || IsErroneous()) << PrettyClass(this);
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, clinit_thread_id_));
}
- void SetClinitThreadId(pid_t new_clinit_thread_id) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetClinitThreadId(pid_t new_clinit_thread_id) REQUIRES_SHARED(Locks::mutator_lock_);
- Object* GetVerifyError() SHARED_REQUIRES(Locks::mutator_lock_) {
+ Object* GetVerifyError() REQUIRES_SHARED(Locks::mutator_lock_) {
// DCHECK(IsErroneous());
return GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_));
}
- uint16_t GetDexClassDefIndex() SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint16_t GetDexClassDefIndex() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_class_def_idx_));
}
- void SetDexClassDefIndex(uint16_t class_def_idx) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetDexClassDefIndex(uint16_t class_def_idx) REQUIRES_SHARED(Locks::mutator_lock_) {
// Not called within a transaction.
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, dex_class_def_idx_), class_def_idx);
}
- uint16_t GetDexTypeIndex() SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint16_t GetDexTypeIndex() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_type_idx_));
}
- void SetDexTypeIndex(uint16_t type_idx) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetDexTypeIndex(uint16_t type_idx) REQUIRES_SHARED(Locks::mutator_lock_) {
// Not called within a transaction.
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, dex_type_idx_), type_idx);
}
uint32_t FindTypeIndexInOtherDexFile(const DexFile& dex_file)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- static Class* GetJavaLangClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static Class* GetJavaLangClass() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(HasJavaLangClass());
return java_lang_Class_.Read();
}
- static bool HasJavaLangClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static bool HasJavaLangClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return !java_lang_Class_.IsNull();
}
// Can't call this SetClass or else gets called instead of Object::SetClass in places.
- static void SetClassClass(Class* java_lang_Class) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void SetClassClass(Class* java_lang_Class) REQUIRES_SHARED(Locks::mutator_lock_);
static void ResetClass();
static void VisitRoots(RootVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Visit native roots visits roots which are keyed off the native pointers such as ArtFields and
// ArtMethods.
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, class Visitor>
void VisitNativeRoots(Visitor& visitor, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// When class is verified, set the kAccSkipAccessChecks flag on each method.
void SetSkipAccessChecksFlagOnAllMethods(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get the descriptor of the class. In a few cases a std::string is required, rather than
// always create one the storage argument is populated and its internal c_str() returned. We do
// this to avoid memory allocation in the common case.
- const char* GetDescriptor(std::string* storage) SHARED_REQUIRES(Locks::mutator_lock_);
+ const char* GetDescriptor(std::string* storage) REQUIRES_SHARED(Locks::mutator_lock_);
- const char* GetArrayDescriptor(std::string* storage) SHARED_REQUIRES(Locks::mutator_lock_);
+ const char* GetArrayDescriptor(std::string* storage) REQUIRES_SHARED(Locks::mutator_lock_);
- bool DescriptorEquals(const char* match) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool DescriptorEquals(const char* match) REQUIRES_SHARED(Locks::mutator_lock_);
- const DexFile::ClassDef* GetClassDef() SHARED_REQUIRES(Locks::mutator_lock_);
+ const DexFile::ClassDef* GetClassDef() REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE uint32_t NumDirectInterfaces() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t NumDirectInterfaces() REQUIRES_SHARED(Locks::mutator_lock_);
- uint16_t GetDirectInterfaceTypeIdx(uint32_t idx) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint16_t GetDirectInterfaceTypeIdx(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_);
static mirror::Class* GetDirectInterface(Thread* self, Handle<mirror::Class> klass,
uint32_t idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- const char* GetSourceFile() SHARED_REQUIRES(Locks::mutator_lock_);
+ const char* GetSourceFile() REQUIRES_SHARED(Locks::mutator_lock_);
- std::string GetLocation() SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string GetLocation() REQUIRES_SHARED(Locks::mutator_lock_);
- const DexFile& GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_);
+ const DexFile& GetDexFile() REQUIRES_SHARED(Locks::mutator_lock_);
- const DexFile::TypeList* GetInterfaceTypeList() SHARED_REQUIRES(Locks::mutator_lock_);
+ const DexFile::TypeList* GetInterfaceTypeList() REQUIRES_SHARED(Locks::mutator_lock_);
// Asserts we are initialized or initializing in the given thread.
void AssertInitializedOrInitializingInThread(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
Class* CopyOf(Thread* self, int32_t new_length, ImTable* imt,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
// For proxy class only.
- ObjectArray<Class>* GetInterfaces() SHARED_REQUIRES(Locks::mutator_lock_);
+ ObjectArray<Class>* GetInterfaces() REQUIRES_SHARED(Locks::mutator_lock_);
// For proxy class only.
- ObjectArray<ObjectArray<Class>>* GetThrows() SHARED_REQUIRES(Locks::mutator_lock_);
+ ObjectArray<ObjectArray<Class>>* GetThrows() REQUIRES_SHARED(Locks::mutator_lock_);
// For reference class only.
- MemberOffset GetDisableIntrinsicFlagOffset() SHARED_REQUIRES(Locks::mutator_lock_);
- MemberOffset GetSlowPathFlagOffset() SHARED_REQUIRES(Locks::mutator_lock_);
- bool GetSlowPathEnabled() SHARED_REQUIRES(Locks::mutator_lock_);
- void SetSlowPath(bool enabled) SHARED_REQUIRES(Locks::mutator_lock_);
+ MemberOffset GetDisableIntrinsicFlagOffset() REQUIRES_SHARED(Locks::mutator_lock_);
+ MemberOffset GetSlowPathFlagOffset() REQUIRES_SHARED(Locks::mutator_lock_);
+ bool GetSlowPathEnabled() REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetSlowPath(bool enabled) REQUIRES_SHARED(Locks::mutator_lock_);
- StringDexCacheType* GetDexCacheStrings() SHARED_REQUIRES(Locks::mutator_lock_);
+ StringDexCacheType* GetDexCacheStrings() REQUIRES_SHARED(Locks::mutator_lock_);
void SetDexCacheStrings(StringDexCacheType* new_dex_cache_strings)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static MemberOffset DexCacheStringsOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_strings_);
}
@@ -1232,10 +1241,10 @@
// May cause thread suspension due to EqualParameters.
ArtMethod* GetDeclaredConstructor(
Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static int32_t GetInnerClassFlags(Handle<Class> h_this, int32_t default_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Used to initialize a class in the allocation code path to ensure it is guarded by a StoreStore
// fence.
@@ -1245,7 +1254,7 @@
}
void operator()(mirror::Object* obj, size_t usable_size) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
const uint32_t class_size_;
@@ -1254,7 +1263,7 @@
};
// Returns true if the class loader is null, ie the class loader is the boot strap class loader.
- bool IsBootStrapClassLoaded() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsBootStrapClassLoaded() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetClassLoader() == nullptr;
}
@@ -1267,20 +1276,20 @@
}
ALWAYS_INLINE ArraySlice<ArtMethod> GetDirectMethodsSliceUnchecked(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE ArraySlice<ArtMethod> GetVirtualMethodsSliceUnchecked(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredMethodsSliceUnchecked(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredVirtualMethodsSliceUnchecked(
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE ArraySlice<ArtMethod> GetCopiedMethodsSliceUnchecked(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Fix up all of the native pointers in the class by running them through the visitor. Only sets
// the corresponding entry in dest if visitor(obj) != obj to prevent dirty memory. Dest should be
@@ -1290,47 +1299,47 @@
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
void FixupNativePointers(mirror::Class* dest, PointerSize pointer_size, const Visitor& visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
ALWAYS_INLINE void SetMethodsPtrInternal(LengthPrefixedArray<ArtMethod>* new_methods)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- void SetVerifyError(Object* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetVerifyError(Object* klass) REQUIRES_SHARED(Locks::mutator_lock_);
template <bool throw_on_failure, bool use_referrers_cache>
bool ResolvedFieldAccessTest(Class* access_to, ArtField* field,
uint32_t field_idx, DexCache* dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <bool throw_on_failure, bool use_referrers_cache, InvokeType throw_invoke_type>
bool ResolvedMethodAccessTest(Class* access_to, ArtMethod* resolved_method,
uint32_t method_idx, DexCache* dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- bool Implements(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsArrayAssignableFromArray(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsAssignableFromArray(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool Implements(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsArrayAssignableFromArray(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsAssignableFromArray(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
- void CheckObjectAlloc() SHARED_REQUIRES(Locks::mutator_lock_);
+ void CheckObjectAlloc() REQUIRES_SHARED(Locks::mutator_lock_);
// Unchecked editions is for root visiting.
- LengthPrefixedArray<ArtField>* GetSFieldsPtrUnchecked() SHARED_REQUIRES(Locks::mutator_lock_);
+ LengthPrefixedArray<ArtField>* GetSFieldsPtrUnchecked() REQUIRES_SHARED(Locks::mutator_lock_);
IterationRange<StrideIterator<ArtField>> GetSFieldsUnchecked()
- SHARED_REQUIRES(Locks::mutator_lock_);
- LengthPrefixedArray<ArtField>* GetIFieldsPtrUnchecked() SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ LengthPrefixedArray<ArtField>* GetIFieldsPtrUnchecked() REQUIRES_SHARED(Locks::mutator_lock_);
IterationRange<StrideIterator<ArtField>> GetIFieldsUnchecked()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// The index in the methods_ array where the first declared virtual method is.
- ALWAYS_INLINE uint32_t GetVirtualMethodsStartOffset() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t GetVirtualMethodsStartOffset() REQUIRES_SHARED(Locks::mutator_lock_);
// The index in the methods_ array where the first direct method is.
- ALWAYS_INLINE uint32_t GetDirectMethodsStartOffset() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t GetDirectMethodsStartOffset() REQUIRES_SHARED(Locks::mutator_lock_);
// The index in the methods_ array where the first copied method is.
- ALWAYS_INLINE uint32_t GetCopiedMethodsStartOffset() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t GetCopiedMethodsStartOffset() REQUIRES_SHARED(Locks::mutator_lock_);
- bool ProxyDescriptorEquals(const char* match) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool ProxyDescriptorEquals(const char* match) REQUIRES_SHARED(Locks::mutator_lock_);
// Check that the pointer size matches the one in the class linker.
ALWAYS_INLINE static void CheckPointerSize(PointerSize pointer_size);
@@ -1341,7 +1350,7 @@
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
void VisitReferences(mirror::Class* klass, const Visitor& visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// 'Class' Object Fields
// Order governed by java field ordering. See art::ClassLinker::LinkFields.
@@ -1457,6 +1466,10 @@
// See also class_size_.
uint32_t object_size_;
+ // Aligned object size for allocation fast path. The value is max uint32_t if the object is
+ // uninitialized or finalizable. Not currently used for variable sized objects.
+ uint32_t object_size_alloc_fast_path_;
+
// The lower 16 bits contains a Primitive::Type value. The upper 16
// bits contains the size shift of the primitive type.
uint32_t primitive_type_;
diff --git a/runtime/mirror/class_loader.h b/runtime/mirror/class_loader.h
index 1957e13..407678a 100644
--- a/runtime/mirror/class_loader.h
+++ b/runtime/mirror/class_loader.h
@@ -36,26 +36,26 @@
return sizeof(ClassLoader);
}
- ClassLoader* GetParent() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ClassLoader* GetParent() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObject<ClassLoader>(OFFSET_OF_OBJECT_MEMBER(ClassLoader, parent_));
}
- ClassTable* GetClassTable() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ClassTable* GetClassTable() REQUIRES_SHARED(Locks::mutator_lock_) {
return reinterpret_cast<ClassTable*>(
GetField64(OFFSET_OF_OBJECT_MEMBER(ClassLoader, class_table_)));
}
- void SetClassTable(ClassTable* class_table) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetClassTable(ClassTable* class_table) REQUIRES_SHARED(Locks::mutator_lock_) {
SetField64<false>(OFFSET_OF_OBJECT_MEMBER(ClassLoader, class_table_),
reinterpret_cast<uint64_t>(class_table));
}
- LinearAlloc* GetAllocator() SHARED_REQUIRES(Locks::mutator_lock_) {
+ LinearAlloc* GetAllocator() REQUIRES_SHARED(Locks::mutator_lock_) {
return reinterpret_cast<LinearAlloc*>(
GetField64(OFFSET_OF_OBJECT_MEMBER(ClassLoader, allocator_)));
}
- void SetAllocator(LinearAlloc* allocator) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetAllocator(LinearAlloc* allocator) REQUIRES_SHARED(Locks::mutator_lock_) {
SetField64<false>(OFFSET_OF_OBJECT_MEMBER(ClassLoader, allocator_),
reinterpret_cast<uint64_t>(allocator));
}
@@ -68,7 +68,7 @@
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
void VisitReferences(mirror::Class* klass, const Visitor& visitor)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::classlinker_classes_lock_);
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index a3071b7..220979a 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -44,13 +44,29 @@
inline void DexCache::SetResolvedString(uint32_t string_idx, mirror::String* resolved) {
DCHECK_LT(string_idx % NumStrings(), NumStrings());
- // TODO default transaction support.
- StringDexCachePair idx_ptr;
- idx_ptr.string_index = string_idx;
- idx_ptr.string_pointer = GcRoot<String>(resolved);
- GetStrings()[string_idx % NumStrings()].store(idx_ptr, std::memory_order_relaxed);
+ GetStrings()[string_idx % NumStrings()].store(
+ StringDexCachePair(resolved, string_idx),
+ std::memory_order_relaxed);
+ Runtime* const runtime = Runtime::Current();
+ if (UNLIKELY(runtime->IsActiveTransaction())) {
+ DCHECK(runtime->IsAotCompiler());
+ runtime->RecordResolveString(this, string_idx);
+ }
// TODO: Fine-grained marking, so that we don't need to go through all arrays in full.
- Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(this);
+ runtime->GetHeap()->WriteBarrierEveryFieldOf(this);
+}
+
+inline void DexCache::ClearString(uint32_t string_idx) {
+ const uint32_t slot_idx = string_idx % NumStrings();
+ DCHECK(Runtime::Current()->IsAotCompiler());
+ StringDexCacheType* slot = &GetStrings()[slot_idx];
+ // This is racy but should only be called from the transactional interpreter.
+ if (slot->load(std::memory_order_relaxed).string_index == string_idx) {
+ StringDexCachePair cleared(
+ nullptr,
+ StringDexCachePair::InvalidStringIndexForSlot(slot_idx));
+ slot->store(cleared, std::memory_order_relaxed);
+ }
}
inline Class* DexCache::GetResolvedType(uint32_t type_idx) {
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 4ddfc7b..7d4021f 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -56,12 +56,20 @@
// it's always non-null if the string id branch succeeds (except for the 0th string id).
// Set the initial state for the 0th entry to be {0,1} which is guaranteed to fail
// the lookup string id == stored id branch.
+ StringDexCachePair(String* string, uint32_t string_idx)
+ : string_pointer(string),
+ string_index(string_idx) {}
+ StringDexCachePair() = default;
+ StringDexCachePair(const StringDexCachePair&) = default;
+ StringDexCachePair& operator=(const StringDexCachePair&) = default;
+
static void Initialize(StringDexCacheType* strings) {
mirror::StringDexCachePair first_elem;
first_elem.string_pointer = GcRoot<String>(nullptr);
- first_elem.string_index = 1;
+ first_elem.string_index = InvalidStringIndexForSlot(0);
strings[0].store(first_elem, std::memory_order_relaxed);
}
+
static GcRoot<String> LookupString(StringDexCacheType* dex_cache,
uint32_t string_idx,
uint32_t cache_size) {
@@ -71,10 +79,15 @@
DCHECK(!index_string.string_pointer.IsNull());
return index_string.string_pointer;
}
+
+ static uint32_t InvalidStringIndexForSlot(uint32_t slot) {
+ // Since the cache size is a power of two, 0 will always map to slot 0.
+ // Use 1 for slot 0 and 0 for all other slots.
+ return (slot == 0) ? 1u : 0u;
+ }
};
using StringDexCacheType = std::atomic<StringDexCachePair>;
-
// C++ mirror of java.lang.DexCache.
class MANAGED DexCache FINAL : public Object {
public:
@@ -105,20 +118,20 @@
uint32_t num_resolved_methods,
ArtField** resolved_fields,
uint32_t num_resolved_fields,
- PointerSize pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_);
void Fixup(ArtMethod* trampoline, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
void FixupStrings(StringDexCacheType* dest, const Visitor& visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
void FixupResolvedTypes(GcRoot<mirror::Class>* dest, const Visitor& visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- String* GetLocation() SHARED_REQUIRES(Locks::mutator_lock_) {
+ String* GetLocation() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_));
}
@@ -159,94 +172,98 @@
}
mirror::String* GetResolvedString(uint32_t string_idx) ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SetResolvedString(uint32_t string_idx, mirror::String* resolved) ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- Class* GetResolvedType(uint32_t type_idx) SHARED_REQUIRES(Locks::mutator_lock_);
+ // Clear a string for a string_idx, used to undo string intern transactions to make sure
+ // the string isn't kept live.
+ void ClearString(uint32_t string_idx) REQUIRES_SHARED(Locks::mutator_lock_);
- void SetResolvedType(uint32_t type_idx, Class* resolved) SHARED_REQUIRES(Locks::mutator_lock_);
+ Class* GetResolvedType(uint32_t type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ void SetResolvedType(uint32_t type_idx, Class* resolved) REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE ArtMethod* GetResolvedMethod(uint32_t method_idx, PointerSize ptr_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE void SetResolvedMethod(uint32_t method_idx,
ArtMethod* resolved,
PointerSize ptr_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Pointer sized variant, used for patching.
ALWAYS_INLINE ArtField* GetResolvedField(uint32_t idx, PointerSize ptr_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Pointer sized variant, used for patching.
ALWAYS_INLINE void SetResolvedField(uint32_t idx, ArtField* field, PointerSize ptr_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- StringDexCacheType* GetStrings() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ StringDexCacheType* GetStrings() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldPtr64<StringDexCacheType*>(StringsOffset());
}
- void SetStrings(StringDexCacheType* strings) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetStrings(StringDexCacheType* strings) ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldPtr<false>(StringsOffset(), strings);
}
- GcRoot<Class>* GetResolvedTypes() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ GcRoot<Class>* GetResolvedTypes() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldPtr<GcRoot<Class>*>(ResolvedTypesOffset());
}
void SetResolvedTypes(GcRoot<Class>* resolved_types)
ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldPtr<false>(ResolvedTypesOffset(), resolved_types);
}
- ArtMethod** GetResolvedMethods() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ ArtMethod** GetResolvedMethods() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldPtr<ArtMethod**>(ResolvedMethodsOffset());
}
void SetResolvedMethods(ArtMethod** resolved_methods)
ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldPtr<false>(ResolvedMethodsOffset(), resolved_methods);
}
- ArtField** GetResolvedFields() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ ArtField** GetResolvedFields() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldPtr<ArtField**>(ResolvedFieldsOffset());
}
void SetResolvedFields(ArtField** resolved_fields)
ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldPtr<false>(ResolvedFieldsOffset(), resolved_fields);
}
- size_t NumStrings() SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t NumStrings() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32(NumStringsOffset());
}
- size_t NumResolvedTypes() SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t NumResolvedTypes() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32(NumResolvedTypesOffset());
}
- size_t NumResolvedMethods() SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t NumResolvedMethods() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32(NumResolvedMethodsOffset());
}
- size_t NumResolvedFields() SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t NumResolvedFields() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32(NumResolvedFieldsOffset());
}
- const DexFile* GetDexFile() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ const DexFile* GetDexFile() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldPtr<const DexFile*>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_));
}
- void SetDexFile(const DexFile* dex_file) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetDexFile(const DexFile* dex_file) REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file);
}
- void SetLocation(mirror::String* location) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetLocation(mirror::String* location) REQUIRES_SHARED(Locks::mutator_lock_);
// NOTE: Get/SetElementPtrSize() are intended for working with ArtMethod** and ArtField**
// provided by GetResolvedMethods/Fields() and ArtMethod::GetDexCacheResolvedMethods(),
@@ -265,7 +282,7 @@
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
void VisitReferences(mirror::Class* klass, const Visitor& visitor)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
HeapReference<Object> dex_;
HeapReference<String> location_;
diff --git a/runtime/mirror/field.h b/runtime/mirror/field.h
index 93fd7f1..7eb9da4 100644
--- a/runtime/mirror/field.h
+++ b/runtime/mirror/field.h
@@ -37,66 +37,66 @@
// C++ mirror of java.lang.reflect.Field.
class MANAGED Field : public AccessibleObject {
public:
- static mirror::Class* StaticClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static mirror::Class* StaticClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return static_class_.Read();
}
- static mirror::Class* ArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static mirror::Class* ArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return array_class_.Read();
}
- ALWAYS_INLINE uint32_t GetDexFieldIndex() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE uint32_t GetDexFieldIndex() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, dex_field_index_));
}
- mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Class* GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_));
}
- uint32_t GetAccessFlags() SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t GetAccessFlags() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, access_flags_));
}
- bool IsStatic() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsStatic() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccStatic) != 0;
}
- bool IsFinal() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsFinal() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccFinal) != 0;
}
- bool IsVolatile() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsVolatile() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccVolatile) != 0;
}
ALWAYS_INLINE Primitive::Type GetTypeAsPrimitiveType()
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return GetType()->GetPrimitiveType();
}
- mirror::Class* GetType() SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Class* GetType() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObject<mirror::Class>(OFFSET_OF_OBJECT_MEMBER(Field, type_));
}
- int32_t GetOffset() SHARED_REQUIRES(Locks::mutator_lock_) {
+ int32_t GetOffset() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, offset_));
}
- static void SetClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
- static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ static void SetClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ static void ResetClass() REQUIRES_SHARED(Locks::mutator_lock_);
- static void SetArrayClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
- static void ResetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ static void SetArrayClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ static void ResetArrayClass() REQUIRES_SHARED(Locks::mutator_lock_);
- static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
// Slow, try to use only for PrettyField and such.
- ArtField* GetArtField() SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtField* GetArtField() REQUIRES_SHARED(Locks::mutator_lock_);
template <PointerSize kPointerSize, bool kTransactionActive = false>
static mirror::Field* CreateFromArtField(Thread* self, ArtField* field,
bool force_resolve)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
private:
HeapReference<mirror::Class> declaring_class_;
@@ -106,27 +106,27 @@
int32_t offset_;
template<bool kTransactionActive>
- void SetDeclaringClass(mirror::Class* c) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetDeclaringClass(mirror::Class* c) REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_), c);
}
template<bool kTransactionActive>
- void SetType(mirror::Class* type) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetType(mirror::Class* type) REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, type_), type);
}
template<bool kTransactionActive>
- void SetAccessFlags(uint32_t flags) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetAccessFlags(uint32_t flags) REQUIRES_SHARED(Locks::mutator_lock_) {
SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, access_flags_), flags);
}
template<bool kTransactionActive>
- void SetDexFieldIndex(uint32_t idx) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetDexFieldIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, dex_field_index_), idx);
}
template<bool kTransactionActive>
- void SetOffset(uint32_t offset) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetOffset(uint32_t offset) REQUIRES_SHARED(Locks::mutator_lock_) {
SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, offset_), offset);
}
diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h
index d6571f2..a1a2f98 100644
--- a/runtime/mirror/iftable.h
+++ b/runtime/mirror/iftable.h
@@ -25,18 +25,18 @@
class MANAGED IfTable FINAL : public ObjectArray<Object> {
public:
- ALWAYS_INLINE Class* GetInterface(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE Class* GetInterface(int32_t i) REQUIRES_SHARED(Locks::mutator_lock_) {
Class* interface = GetWithoutChecks((i * kMax) + kInterface)->AsClass();
DCHECK(interface != nullptr);
return interface;
}
ALWAYS_INLINE void SetInterface(int32_t i, Class* interface)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- PointerArray* GetMethodArray(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_) {
+ PointerArray* GetMethodArray(int32_t i) REQUIRES_SHARED(Locks::mutator_lock_) {
auto* method_array = down_cast<PointerArray*>(Get<kVerifyFlags, kReadBarrierOption>(
(i * kMax) + kMethodArray));
DCHECK(method_array != nullptr);
@@ -45,20 +45,20 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- size_t GetMethodArrayCount(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t GetMethodArrayCount(int32_t i) REQUIRES_SHARED(Locks::mutator_lock_) {
auto* method_array = down_cast<PointerArray*>(
Get<kVerifyFlags, kReadBarrierOption>((i * kMax) + kMethodArray));
return method_array == nullptr ? 0u : method_array->GetLength();
}
- void SetMethodArray(int32_t i, PointerArray* arr) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetMethodArray(int32_t i, PointerArray* arr) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(arr != nullptr);
auto idx = i * kMax + kMethodArray;
DCHECK(Get(idx) == nullptr);
Set<false>(idx, arr);
}
- size_t Count() SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t Count() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetLength() / kMax;
}
diff --git a/runtime/mirror/method.h b/runtime/mirror/method.h
index be51784..6881991 100644
--- a/runtime/mirror/method.h
+++ b/runtime/mirror/method.h
@@ -30,25 +30,25 @@
public:
template <PointerSize kPointerSize, bool kTransactionActive>
static Method* CreateFromArtMethod(Thread* self, ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
- static mirror::Class* StaticClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static mirror::Class* StaticClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return static_class_.Read();
}
- static void SetClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void SetClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
- static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ static void ResetClass() REQUIRES_SHARED(Locks::mutator_lock_);
- static mirror::Class* ArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static mirror::Class* ArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return array_class_.Read();
}
- static void SetArrayClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void SetArrayClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
- static void ResetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ static void ResetArrayClass() REQUIRES_SHARED(Locks::mutator_lock_);
- static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
private:
static GcRoot<Class> static_class_; // java.lang.reflect.Method.class.
@@ -62,25 +62,25 @@
public:
template <PointerSize kPointerSize, bool kTransactionActive>
static Constructor* CreateFromArtMethod(Thread* self, ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
- static mirror::Class* StaticClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static mirror::Class* StaticClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return static_class_.Read();
}
- static void SetClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void SetClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
- static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ static void ResetClass() REQUIRES_SHARED(Locks::mutator_lock_);
- static mirror::Class* ArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static mirror::Class* ArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return array_class_.Read();
}
- static void SetArrayClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void SetArrayClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
- static void ResetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ static void ResetArrayClass() REQUIRES_SHARED(Locks::mutator_lock_);
- static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
private:
static GcRoot<Class> static_class_; // java.lang.reflect.Constructor.class.
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 27f8bd7..0f5cbb2 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -542,7 +542,7 @@
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
bool kIsVolatile>
inline void Object::SetFieldBoolean(MemberOffset field_offset, uint8_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (kCheckTransaction) {
DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
}
@@ -560,7 +560,7 @@
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
bool kIsVolatile>
inline void Object::SetFieldByte(MemberOffset field_offset, int8_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (kCheckTransaction) {
DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
}
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 13c536e..c37deb5 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -47,7 +47,7 @@
: dest_obj_(dest_obj) {}
void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
- ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
// GetFieldObject() contains a RB.
Object* ref = obj->GetFieldObject<Object>(offset);
// No WB here as a large object space does not have a card table
@@ -56,7 +56,7 @@
}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
- ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
// Copy java.lang.ref.Reference.referent which isn't visited in
// Object::VisitReferences().
DCHECK(klass->IsTypeOfReferenceClass());
@@ -112,7 +112,7 @@
}
void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Object::CopyObject(self_, obj, orig_->Get(), num_bytes_);
}
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 8649294..262cb57 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -88,49 +88,49 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- ALWAYS_INLINE Class* GetClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE Class* GetClass() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetClass(Class* new_klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetClass(Class* new_klass) REQUIRES_SHARED(Locks::mutator_lock_);
// TODO: Clean these up and change to return int32_t
- Object* GetReadBarrierPointer() SHARED_REQUIRES(Locks::mutator_lock_);
+ Object* GetReadBarrierPointer() REQUIRES_SHARED(Locks::mutator_lock_);
// Get the read barrier pointer with release semantics, only supported for baker.
- Object* GetReadBarrierPointerAcquire() SHARED_REQUIRES(Locks::mutator_lock_);
+ Object* GetReadBarrierPointerAcquire() REQUIRES_SHARED(Locks::mutator_lock_);
#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
NO_RETURN
#endif
- void SetReadBarrierPointer(Object* rb_ptr) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetReadBarrierPointer(Object* rb_ptr) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kCasRelease = false>
ALWAYS_INLINE bool AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object* rb_ptr)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE uint32_t GetMarkBit() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t GetMarkBit() REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE bool AtomicSetMarkBit(uint32_t expected_mark_bit, uint32_t mark_bit)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- void AssertReadBarrierPointer() const SHARED_REQUIRES(Locks::mutator_lock_);
+ void AssertReadBarrierPointer() const REQUIRES_SHARED(Locks::mutator_lock_);
// The verifier treats all interfaces as java.lang.Object and relies on runtime checks in
// invoke-interface to detect incompatible interface types.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool VerifierInstanceOf(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool VerifierInstanceOf(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE bool InstanceOf(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE bool InstanceOf(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- size_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_);
+ size_t SizeOf() REQUIRES_SHARED(Locks::mutator_lock_);
- Object* Clone(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_)
+ Object* Clone(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
int32_t IdentityHashCode() const
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
static MemberOffset MonitorOffset() {
@@ -140,356 +140,356 @@
// As_volatile can be false if the mutators are suspended. This is an optimization since it
// avoids the barriers.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- LockWord GetLockWord(bool as_volatile) SHARED_REQUIRES(Locks::mutator_lock_);
+ LockWord GetLockWord(bool as_volatile) REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetLockWord(LockWord new_val, bool as_volatile) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetLockWord(LockWord new_val, bool as_volatile) REQUIRES_SHARED(Locks::mutator_lock_);
bool CasLockWordWeakSequentiallyConsistent(LockWord old_val, LockWord new_val)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool CasLockWordWeakRelaxed(LockWord old_val, LockWord new_val)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool CasLockWordWeakRelease(LockWord old_val, LockWord new_val)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
uint32_t GetLockOwnerThreadId();
// Try to enter the monitor, returns non null if we succeeded.
mirror::Object* MonitorTryEnter(Thread* self)
EXCLUSIVE_LOCK_FUNCTION()
REQUIRES(!Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* MonitorEnter(Thread* self)
EXCLUSIVE_LOCK_FUNCTION()
REQUIRES(!Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool MonitorExit(Thread* self)
REQUIRES(!Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
UNLOCK_FUNCTION();
- void Notify(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
- void NotifyAll(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
- void Wait(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
- void Wait(Thread* self, int64_t timeout, int32_t nanos) SHARED_REQUIRES(Locks::mutator_lock_);
+ void Notify(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
+ void NotifyAll(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
+ void Wait(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
+ void Wait(Thread* self, int64_t timeout, int32_t nanos) REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsClass() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- Class* AsClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ Class* AsClass() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsObjectArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsObjectArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<class T,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- ObjectArray<T>* AsObjectArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ ObjectArray<T>* AsObjectArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsClassLoader() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsClassLoader() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- ClassLoader* AsClassLoader() SHARED_REQUIRES(Locks::mutator_lock_);
+ ClassLoader* AsClassLoader() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsDexCache() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- DexCache* AsDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
+ DexCache* AsDexCache() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsArrayInstance() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsArrayInstance() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- Array* AsArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ Array* AsArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- BooleanArray* AsBooleanArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ BooleanArray* AsBooleanArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ByteArray* AsByteArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ ByteArray* AsByteArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ByteArray* AsByteSizedArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ ByteArray* AsByteSizedArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- CharArray* AsCharArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ CharArray* AsCharArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ShortArray* AsShortArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ ShortArray* AsShortArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ShortArray* AsShortSizedArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ ShortArray* AsShortSizedArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsIntArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsIntArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- IntArray* AsIntArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ IntArray* AsIntArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsLongArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsLongArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- LongArray* AsLongArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ LongArray* AsLongArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsFloatArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsFloatArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- FloatArray* AsFloatArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ FloatArray* AsFloatArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsDoubleArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsDoubleArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- DoubleArray* AsDoubleArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ DoubleArray* AsDoubleArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsString() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsString() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- String* AsString() SHARED_REQUIRES(Locks::mutator_lock_);
+ String* AsString() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- Throwable* AsThrowable() SHARED_REQUIRES(Locks::mutator_lock_);
+ Throwable* AsThrowable() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsReferenceInstance() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- Reference* AsReference() SHARED_REQUIRES(Locks::mutator_lock_);
+ Reference* AsReference() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsWeakReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsWeakReferenceInstance() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsSoftReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsSoftReferenceInstance() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsFinalizerReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsFinalizerReferenceInstance() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- FinalizerReference* AsFinalizerReference() SHARED_REQUIRES(Locks::mutator_lock_);
+ FinalizerReference* AsFinalizerReference() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPhantomReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsPhantomReferenceInstance() REQUIRES_SHARED(Locks::mutator_lock_);
// Accessor for Java type fields.
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier, bool kIsVolatile = false>
ALWAYS_INLINE T* GetFieldObject(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ALWAYS_INLINE T* GetFieldObjectVolatile(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset, Object* new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetFieldObject(MemberOffset field_offset, Object* new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetFieldObjectVolatile(MemberOffset field_offset, Object* new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldWeakSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value,
Object* new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier(MemberOffset field_offset,
Object* old_value,
Object* new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldStrongSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value,
Object* new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier(MemberOffset field_offset,
Object* old_value,
Object* new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldWeakRelaxedObjectWithoutWriteBarrier(MemberOffset field_offset,
Object* old_value,
Object* new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldStrongRelaxedObjectWithoutWriteBarrier(MemberOffset field_offset,
Object* old_value,
Object* new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
HeapReference<Object>* GetFieldObjectReferenceAddr(MemberOffset field_offset);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE uint8_t GetFieldBoolean(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE int8_t GetFieldByte(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE uint8_t GetFieldBooleanVolatile(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE int8_t GetFieldByteVolatile(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetFieldBoolean(MemberOffset field_offset, uint8_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetFieldByte(MemberOffset field_offset, int8_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetFieldBooleanVolatile(MemberOffset field_offset, uint8_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetFieldByteVolatile(MemberOffset field_offset, int8_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE uint16_t GetFieldChar(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE int16_t GetFieldShort(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE uint16_t GetFieldCharVolatile(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE int16_t GetFieldShortVolatile(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetFieldChar(MemberOffset field_offset, uint16_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetFieldShort(MemberOffset field_offset, int16_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetFieldCharVolatile(MemberOffset field_offset, uint16_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetFieldShortVolatile(MemberOffset field_offset, int16_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE int32_t GetField32(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE int32_t GetField32Volatile(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetField32(MemberOffset field_offset, int32_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetField32Volatile(MemberOffset field_offset, int32_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE bool CasFieldWeakSequentiallyConsistent32(MemberOffset field_offset,
int32_t old_value, int32_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldWeakRelaxed32(MemberOffset field_offset, int32_t old_value,
int32_t new_value) ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldWeakRelease32(MemberOffset field_offset, int32_t old_value,
int32_t new_value) ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldStrongSequentiallyConsistent32(MemberOffset field_offset, int32_t old_value,
int32_t new_value) ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE int64_t GetField64(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE int64_t GetField64Volatile(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetField64(MemberOffset field_offset, int64_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetField64Volatile(MemberOffset field_offset, int64_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldWeakSequentiallyConsistent64(MemberOffset field_offset, int64_t old_value,
int64_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldStrongSequentiallyConsistent64(MemberOffset field_offset, int64_t old_value,
int64_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T>
void SetFieldPtr(MemberOffset field_offset, T new_value)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldPtrWithSize<kTransactionActive, kCheckTransaction, kVerifyFlags>(
field_offset, new_value, kRuntimePointerSize);
}
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T>
void SetFieldPtr64(MemberOffset field_offset, T new_value)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldPtrWithSize<kTransactionActive, kCheckTransaction, kVerifyFlags>(
field_offset, new_value, 8u);
}
@@ -499,7 +499,7 @@
ALWAYS_INLINE void SetFieldPtrWithSize(MemberOffset field_offset,
T new_value,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (pointer_size == PointerSize::k32) {
intptr_t ptr = reinterpret_cast<intptr_t>(new_value);
DCHECK_EQ(static_cast<int32_t>(ptr), ptr); // Check that we dont lose any non 0 bits.
@@ -511,7 +511,7 @@
}
}
// TODO fix thread safety analysis broken by the use of template. This should be
- // SHARED_REQUIRES(Locks::mutator_lock_).
+ // REQUIRES_SHARED(Locks::mutator_lock_).
template <bool kVisitNativeRoots = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
@@ -520,7 +520,7 @@
void VisitReferences(const Visitor& visitor, const JavaLangRefVisitor& ref_visitor)
NO_THREAD_SAFETY_ANALYSIS;
- ArtField* FindFieldByOffset(MemberOffset offset) SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtField* FindFieldByOffset(MemberOffset offset) REQUIRES_SHARED(Locks::mutator_lock_);
// Used by object_test.
static void SetHashCodeSeed(uint32_t new_seed);
@@ -531,19 +531,19 @@
// Accessors for non-Java type fields
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
T GetFieldPtr(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldPtrWithSize<T, kVerifyFlags, kIsVolatile>(field_offset, kRuntimePointerSize);
}
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
T GetFieldPtr64(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldPtrWithSize<T, kVerifyFlags, kIsVolatile>(field_offset,
PointerSize::k64);
}
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE T GetFieldPtrWithSize(MemberOffset field_offset, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (pointer_size == PointerSize::k32) {
return reinterpret_cast<T>(GetField32<kVerifyFlags, kIsVolatile>(field_offset));
} else {
@@ -563,31 +563,31 @@
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
void VisitInstanceFieldsReferences(mirror::Class* klass, const Visitor& visitor) HOT_ATTR
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
void VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor) HOT_ATTR
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
template<typename kSize, bool kIsVolatile>
ALWAYS_INLINE void SetField(MemberOffset field_offset, kSize new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<typename kSize, bool kIsVolatile>
ALWAYS_INLINE kSize GetField(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get a field with acquire semantics.
template<typename kSize>
ALWAYS_INLINE kSize GetFieldAcquire(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Verify the type correctness of stores to fields.
// TODO: This can cause thread suspension and isn't moving GC safe.
void CheckFieldAssignmentImpl(MemberOffset field_offset, Object* new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CheckFieldAssignment(MemberOffset field_offset, Object* new_value)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (kCheckFieldAssignments) {
CheckFieldAssignmentImpl(field_offset, new_value);
}
@@ -598,7 +598,7 @@
// Class::CopyOf().
static Object* CopyObject(Thread* self, mirror::Object* dest, mirror::Object* src,
size_t num_bytes)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static Atomic<uint32_t> hash_code_seed;
diff --git a/runtime/mirror/object_array.h b/runtime/mirror/object_array.h
index a99d616..19b9d87 100644
--- a/runtime/mirror/object_array.h
+++ b/runtime/mirror/object_array.h
@@ -32,23 +32,23 @@
static ObjectArray<T>* Alloc(Thread* self, Class* object_array_class, int32_t length,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static ObjectArray<T>* Alloc(Thread* self, Class* object_array_class, int32_t length)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- ALWAYS_INLINE T* Get(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE T* Get(int32_t i) REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if the object can be stored into the array. If not, throws
// an ArrayStoreException and returns false.
- // TODO fix thread safety analysis: should be SHARED_REQUIRES(Locks::mutator_lock_).
+ // TODO fix thread safety analysis: should be REQUIRES_SHARED(Locks::mutator_lock_).
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CheckAssignable(T* object) NO_THREAD_SAFETY_ANALYSIS;
- ALWAYS_INLINE void Set(int32_t i, T* object) SHARED_REQUIRES(Locks::mutator_lock_);
- // TODO fix thread safety analysis: should be SHARED_REQUIRES(Locks::mutator_lock_).
+ ALWAYS_INLINE void Set(int32_t i, T* object) REQUIRES_SHARED(Locks::mutator_lock_);
+ // TODO fix thread safety analysis: should be REQUIRES_SHARED(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void Set(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS;
@@ -56,41 +56,41 @@
// Set element without bound and element type checks, to be used in limited
// circumstances, such as during boot image writing.
// TODO fix thread safety analysis broken by the use of template. This should be
- // SHARED_REQUIRES(Locks::mutator_lock_).
+ // REQUIRES_SHARED(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetWithoutChecks(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS;
// TODO fix thread safety analysis broken by the use of template. This should be
- // SHARED_REQUIRES(Locks::mutator_lock_).
+ // REQUIRES_SHARED(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetWithoutChecksAndWriteBarrier(int32_t i, T* object)
NO_THREAD_SAFETY_ANALYSIS;
- ALWAYS_INLINE T* GetWithoutChecks(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE T* GetWithoutChecks(int32_t i) REQUIRES_SHARED(Locks::mutator_lock_);
// Copy src into this array (dealing with overlaps as memmove does) without assignability checks.
void AssignableMemmove(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
- int32_t count) SHARED_REQUIRES(Locks::mutator_lock_);
+ int32_t count) REQUIRES_SHARED(Locks::mutator_lock_);
// Copy src into this array assuming no overlap and without assignability checks.
void AssignableMemcpy(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
- int32_t count) SHARED_REQUIRES(Locks::mutator_lock_);
+ int32_t count) REQUIRES_SHARED(Locks::mutator_lock_);
// Copy src into this array with assignability checks.
template<bool kTransactionActive>
void AssignableCheckingMemcpy(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
int32_t count, bool throw_exception)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ObjectArray<T>* CopyOf(Thread* self, int32_t new_length)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static MemberOffset OffsetOfElement(int32_t i);
private:
// TODO fix thread safety analysis broken by the use of template. This should be
- // SHARED_REQUIRES(Locks::mutator_lock_).
+ // REQUIRES_SHARED(Locks::mutator_lock_).
template<typename Visitor>
void VisitReferences(const Visitor& visitor) NO_THREAD_SAFETY_ANALYSIS;
diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h
index 2a5c88e..583cfc3 100644
--- a/runtime/mirror/object_reference.h
+++ b/runtime/mirror/object_reference.h
@@ -33,11 +33,11 @@
template<bool kPoisonReferences, class MirrorType>
class MANAGED ObjectReference {
public:
- MirrorType* AsMirrorPtr() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ MirrorType* AsMirrorPtr() const REQUIRES_SHARED(Locks::mutator_lock_) {
return UnCompress();
}
- void Assign(MirrorType* other) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Assign(MirrorType* other) REQUIRES_SHARED(Locks::mutator_lock_) {
reference_ = Compress(other);
}
@@ -56,18 +56,18 @@
protected:
ObjectReference<kPoisonReferences, MirrorType>(MirrorType* mirror_ptr)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: reference_(Compress(mirror_ptr)) {
}
// Compress reference to its bit representation.
- static uint32_t Compress(MirrorType* mirror_ptr) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static uint32_t Compress(MirrorType* mirror_ptr) REQUIRES_SHARED(Locks::mutator_lock_) {
uintptr_t as_bits = reinterpret_cast<uintptr_t>(mirror_ptr);
return static_cast<uint32_t>(kPoisonReferences ? -as_bits : as_bits);
}
// Uncompress an encoded reference from its bit representation.
- MirrorType* UnCompress() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ MirrorType* UnCompress() const REQUIRES_SHARED(Locks::mutator_lock_) {
uintptr_t as_bits = kPoisonReferences ? -reference_ : reference_;
return reinterpret_cast<MirrorType*>(as_bits);
}
@@ -83,11 +83,11 @@
class MANAGED HeapReference : public ObjectReference<kPoisonHeapReferences, MirrorType> {
public:
static HeapReference<MirrorType> FromMirrorPtr(MirrorType* mirror_ptr)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return HeapReference<MirrorType>(mirror_ptr);
}
private:
- HeapReference<MirrorType>(MirrorType* mirror_ptr) SHARED_REQUIRES(Locks::mutator_lock_)
+ HeapReference<MirrorType>(MirrorType* mirror_ptr) REQUIRES_SHARED(Locks::mutator_lock_)
: ObjectReference<kPoisonHeapReferences, MirrorType>(mirror_ptr) {}
};
@@ -95,16 +95,16 @@
template<class MirrorType>
class MANAGED CompressedReference : public mirror::ObjectReference<false, MirrorType> {
public:
- CompressedReference<MirrorType>() SHARED_REQUIRES(Locks::mutator_lock_)
+ CompressedReference<MirrorType>() REQUIRES_SHARED(Locks::mutator_lock_)
: mirror::ObjectReference<false, MirrorType>(nullptr) {}
static CompressedReference<MirrorType> FromMirrorPtr(MirrorType* p)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return CompressedReference<MirrorType>(p);
}
private:
- CompressedReference<MirrorType>(MirrorType* p) SHARED_REQUIRES(Locks::mutator_lock_)
+ CompressedReference<MirrorType>(MirrorType* p) REQUIRES_SHARED(Locks::mutator_lock_)
: mirror::ObjectReference<false, MirrorType>(p) {}
};
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index b35a479..afd6115 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -49,7 +49,7 @@
const char* utf8_in,
const char* utf16_expected_le,
int32_t expected_hash)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::unique_ptr<uint16_t[]> utf16_expected(new uint16_t[expected_utf16_length]);
for (int32_t i = 0; i < expected_utf16_length; i++) {
uint16_t ch = (((utf16_expected_le[i*2 + 0] & 0xff) << 8) |
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
index 38c6616..6a8b32b 100644
--- a/runtime/mirror/reference.h
+++ b/runtime/mirror/reference.h
@@ -64,26 +64,26 @@
return OFFSET_OF_OBJECT_MEMBER(Reference, referent_);
}
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- Object* GetReferent() SHARED_REQUIRES(Locks::mutator_lock_) {
+ Object* GetReferent() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObjectVolatile<Object, kDefaultVerifyFlags, kReadBarrierOption>(
ReferentOffset());
}
template<bool kTransactionActive>
- void SetReferent(Object* referent) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetReferent(Object* referent) REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), referent);
}
template<bool kTransactionActive>
- void ClearReferent() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void ClearReferent() REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), nullptr);
}
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- Reference* GetPendingNext() SHARED_REQUIRES(Locks::mutator_lock_) {
+ Reference* GetPendingNext() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObject<Reference, kDefaultVerifyFlags, kReadBarrierOption>(PendingNextOffset());
}
void SetPendingNext(Reference* pending_next)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (Runtime::Current()->IsActiveTransaction()) {
SetFieldObject<true>(PendingNextOffset(), pending_next);
} else {
@@ -103,22 +103,22 @@
// should not be processed again until and unless the reference has been
// removed from the list after having determined the reference is not ready
// to be enqueued on a java ReferenceQueue.
- bool IsUnprocessed() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsUnprocessed() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetPendingNext<kWithoutReadBarrier>() == nullptr;
}
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- static Class* GetJavaLangRefReference() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static Class* GetJavaLangRefReference() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!java_lang_ref_Reference_.IsNull());
return java_lang_ref_Reference_.Read<kReadBarrierOption>();
}
static void SetClass(Class* klass);
static void ResetClass();
- static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
private:
// Note: This avoids a read barrier, it should only be used by the GC.
- HeapReference<Object>* GetReferentReferenceAddr() SHARED_REQUIRES(Locks::mutator_lock_) {
+ HeapReference<Object>* GetReferentReferenceAddr() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObjectReferenceAddr<kDefaultVerifyFlags>(ReferentOffset());
}
@@ -144,10 +144,10 @@
}
template<bool kTransactionActive>
- void SetZombie(Object* zombie) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetZombie(Object* zombie) REQUIRES_SHARED(Locks::mutator_lock_) {
return SetFieldObjectVolatile<kTransactionActive>(ZombieOffset(), zombie);
}
- Object* GetZombie() SHARED_REQUIRES(Locks::mutator_lock_) {
+ Object* GetZombie() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObjectVolatile<Object>(ZombieOffset());
}
diff --git a/runtime/mirror/stack_trace_element.h b/runtime/mirror/stack_trace_element.h
index 1167391..4b3d9d0 100644
--- a/runtime/mirror/stack_trace_element.h
+++ b/runtime/mirror/stack_trace_element.h
@@ -31,32 +31,32 @@
// C++ mirror of java.lang.StackTraceElement
class MANAGED StackTraceElement FINAL : public Object {
public:
- String* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ String* GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_));
}
- String* GetMethodName() SHARED_REQUIRES(Locks::mutator_lock_) {
+ String* GetMethodName() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, method_name_));
}
- String* GetFileName() SHARED_REQUIRES(Locks::mutator_lock_) {
+ String* GetFileName() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, file_name_));
}
- int32_t GetLineNumber() SHARED_REQUIRES(Locks::mutator_lock_) {
+ int32_t GetLineNumber() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, line_number_));
}
static StackTraceElement* Alloc(Thread* self, Handle<String> declaring_class,
Handle<String> method_name, Handle<String> file_name,
int32_t line_number)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static void SetClass(Class* java_lang_StackTraceElement);
static void ResetClass();
static void VisitRoots(RootVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
- static Class* GetStackTraceElement() SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ static Class* GetStackTraceElement() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!java_lang_StackTraceElement_.IsNull());
return java_lang_StackTraceElement_.Read();
}
@@ -71,7 +71,7 @@
template<bool kTransactionActive>
void Init(Handle<String> declaring_class, Handle<String> method_name, Handle<String> file_name,
int32_t line_number)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static GcRoot<Class> java_lang_StackTraceElement_;
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index bc39ea8..86e5139 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -44,7 +44,7 @@
}
void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Avoid AsString as object is not yet in live bitmap or allocation stack.
String* string = down_cast<String*>(obj);
string->SetCount(count_);
@@ -64,7 +64,7 @@
}
void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Avoid AsString as object is not yet in live bitmap or allocation stack.
String* string = down_cast<String*>(obj);
string->SetCount(count_);
@@ -100,7 +100,7 @@
}
void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Avoid AsString as object is not yet in live bitmap or allocation stack.
String* string = down_cast<String*>(obj);
string->SetCount(count_);
@@ -132,7 +132,7 @@
}
void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Avoid AsString as object is not yet in live bitmap or allocation stack.
String* string = down_cast<String*>(obj);
string->SetCount(count_);
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index 8695fe8..a18692f 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -53,100 +53,100 @@
return OFFSET_OF_OBJECT_MEMBER(String, value_);
}
- uint16_t* GetValue() SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint16_t* GetValue() REQUIRES_SHARED(Locks::mutator_lock_) {
return &value_[0];
}
- uint8_t* GetValueCompressed() SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint8_t* GetValueCompressed() REQUIRES_SHARED(Locks::mutator_lock_) {
return &value_compressed_[0];
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- size_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_);
+ size_t SizeOf() REQUIRES_SHARED(Locks::mutator_lock_);
// Taking out the first/uppermost bit because it is not part of actual length value
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- int32_t GetLength() SHARED_REQUIRES(Locks::mutator_lock_) {
+ int32_t GetLength() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetLengthFromCount(GetCount<kVerifyFlags>());
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- int32_t GetCount() SHARED_REQUIRES(Locks::mutator_lock_) {
+ int32_t GetCount() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(String, count_));
}
- void SetCount(int32_t new_count) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetCount(int32_t new_count) REQUIRES_SHARED(Locks::mutator_lock_) {
// Count is invariant so use non-transactional mode. Also disable check as we may run inside
// a transaction.
DCHECK_LE(0, (new_count & INT32_MAX));
SetField32<false, false>(OFFSET_OF_OBJECT_MEMBER(String, count_), new_count);
}
- int32_t GetHashCode() SHARED_REQUIRES(Locks::mutator_lock_);
+ int32_t GetHashCode() REQUIRES_SHARED(Locks::mutator_lock_);
// Computes, stores, and returns the hash code.
- int32_t ComputeHashCode() SHARED_REQUIRES(Locks::mutator_lock_);
+ int32_t ComputeHashCode() REQUIRES_SHARED(Locks::mutator_lock_);
- int32_t GetUtfLength() SHARED_REQUIRES(Locks::mutator_lock_);
+ int32_t GetUtfLength() REQUIRES_SHARED(Locks::mutator_lock_);
- uint16_t CharAt(int32_t index) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint16_t CharAt(int32_t index) REQUIRES_SHARED(Locks::mutator_lock_);
- void SetCharAt(int32_t index, uint16_t c) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetCharAt(int32_t index, uint16_t c) REQUIRES_SHARED(Locks::mutator_lock_);
- String* Intern() SHARED_REQUIRES(Locks::mutator_lock_);
+ String* Intern() REQUIRES_SHARED(Locks::mutator_lock_);
template <bool kIsInstrumented>
ALWAYS_INLINE static String* AllocFromByteArray(Thread* self, int32_t byte_length,
Handle<ByteArray> array, int32_t offset,
int32_t high_byte,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
template <bool kIsInstrumented>
ALWAYS_INLINE static String* AllocFromCharArray(Thread* self, int32_t count,
Handle<CharArray> array, int32_t offset,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
template <bool kIsInstrumented>
ALWAYS_INLINE static String* AllocFromString(Thread* self, int32_t string_length,
Handle<String> string, int32_t offset,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
template <bool kIsInstrumented>
ALWAYS_INLINE static String* AllocEmptyString(Thread* self,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static String* AllocFromStrings(Thread* self, Handle<String> string, Handle<String> string2)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static String* AllocFromUtf16(Thread* self, int32_t utf16_length, const uint16_t* utf16_data_in)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static String* AllocFromModifiedUtf8(Thread* self, const char* utf)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static String* AllocFromModifiedUtf8(Thread* self, int32_t utf16_length,
const char* utf8_data_in, int32_t utf8_length)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static String* AllocFromModifiedUtf8(Thread* self, int32_t utf16_length, const char* utf8_data_in)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
// TODO: This is only used in the interpreter to compare against
// entries from a dex files constant pool (ArtField names). Should
// we unify this with Equals(const StringPiece&); ?
- bool Equals(const char* modified_utf8) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool Equals(const char* modified_utf8) REQUIRES_SHARED(Locks::mutator_lock_);
// TODO: This is only used to compare DexCache.location with
// a dex_file's location (which is an std::string). Do we really
// need this in mirror::String just for that one usage ?
bool Equals(const StringPiece& modified_utf8)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- bool Equals(String* that) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool Equals(String* that) REQUIRES_SHARED(Locks::mutator_lock_);
// Compare UTF-16 code point values not in a locale-sensitive manner
int Compare(int32_t utf16_length, const char* utf8_data_in);
@@ -154,31 +154,31 @@
// TODO: do we need this overload? give it a more intention-revealing name.
bool Equals(const uint16_t* that_chars, int32_t that_offset,
int32_t that_length)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Create a modified UTF-8 encoded std::string from a java/lang/String object.
- std::string ToModifiedUtf8() SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string ToModifiedUtf8() REQUIRES_SHARED(Locks::mutator_lock_);
- int32_t FastIndexOf(int32_t ch, int32_t start) SHARED_REQUIRES(Locks::mutator_lock_);
+ int32_t FastIndexOf(int32_t ch, int32_t start) REQUIRES_SHARED(Locks::mutator_lock_);
template <typename MemoryType>
int32_t FastIndexOf(MemoryType* chars, int32_t ch, int32_t start)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- int32_t CompareTo(String* other) SHARED_REQUIRES(Locks::mutator_lock_);
+ int32_t CompareTo(String* other) REQUIRES_SHARED(Locks::mutator_lock_);
- CharArray* ToCharArray(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_)
+ CharArray* ToCharArray(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
void GetChars(int32_t start, int32_t end, Handle<CharArray> array, int32_t index)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsCompressed() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsCompressed() REQUIRES_SHARED(Locks::mutator_lock_) {
return kUseStringCompression && GetCompressionFlagFromCount(GetCount());
}
- bool IsValueNull() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsValueNull() REQUIRES_SHARED(Locks::mutator_lock_);
template<typename MemoryType>
static bool AllASCII(const MemoryType* const chars, const int length);
@@ -195,17 +195,17 @@
return kUseStringCompression ? (count | (1u << 31)) : count;
}
- static Class* GetJavaLangString() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static Class* GetJavaLangString() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!java_lang_String_.IsNull());
return java_lang_String_.Read();
}
- static void SetClass(Class* java_lang_String) SHARED_REQUIRES(Locks::mutator_lock_);
- static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_);
- static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void SetClass(Class* java_lang_String) REQUIRES_SHARED(Locks::mutator_lock_);
+ static void ResetClass() REQUIRES_SHARED(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void SetHashCode(int32_t new_hash_code) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetHashCode(int32_t new_hash_code) REQUIRES_SHARED(Locks::mutator_lock_) {
// Hash code is invariant so use non-transactional mode. Also disable check as we may run inside
// a transaction.
DCHECK_EQ(0, GetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_)));
@@ -216,7 +216,7 @@
ALWAYS_INLINE static String* Alloc(Thread* self, int32_t utf16_length_with_flag,
gc::AllocatorType allocator_type,
const PreFenceVisitor& pre_fence_visitor)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
// First bit (uppermost/leftmost) is taken out for Compressed/Uncompressed flag
diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc
index 0bccc8b..8f3ed84 100644
--- a/runtime/mirror/throwable.cc
+++ b/runtime/mirror/throwable.cc
@@ -54,7 +54,7 @@
}
}
-void Throwable::SetStackState(Object* state) SHARED_REQUIRES(Locks::mutator_lock_) {
+void Throwable::SetStackState(Object* state) REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(state != nullptr);
if (Runtime::Current()->IsActiveTransaction()) {
SetFieldObjectVolatile<true>(OFFSET_OF_OBJECT_MEMBER(Throwable, backtrace_), state);
diff --git a/runtime/mirror/throwable.h b/runtime/mirror/throwable.h
index 6aacc8d..76824cb 100644
--- a/runtime/mirror/throwable.h
+++ b/runtime/mirror/throwable.h
@@ -31,38 +31,38 @@
// C++ mirror of java.lang.Throwable
class MANAGED Throwable : public Object {
public:
- void SetDetailMessage(String* new_detail_message) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetDetailMessage(String* new_detail_message) REQUIRES_SHARED(Locks::mutator_lock_);
- String* GetDetailMessage() SHARED_REQUIRES(Locks::mutator_lock_) {
+ String* GetDetailMessage() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_));
}
- std::string Dump() SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() REQUIRES_SHARED(Locks::mutator_lock_);
// This is a runtime version of initCause, you shouldn't use it if initCause may have been
// overridden. Also it asserts rather than throwing exceptions. Currently this is only used
// in cases like the verifier where the checks cannot fail and initCause isn't overridden.
- void SetCause(Throwable* cause) SHARED_REQUIRES(Locks::mutator_lock_);
- void SetStackState(Object* state) SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsCheckedException() SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetCause(Throwable* cause) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetStackState(Object* state) REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsCheckedException() REQUIRES_SHARED(Locks::mutator_lock_);
- static Class* GetJavaLangThrowable() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static Class* GetJavaLangThrowable() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!java_lang_Throwable_.IsNull());
return java_lang_Throwable_.Read();
}
- int32_t GetStackDepth() SHARED_REQUIRES(Locks::mutator_lock_);
+ int32_t GetStackDepth() REQUIRES_SHARED(Locks::mutator_lock_);
static void SetClass(Class* java_lang_Throwable);
static void ResetClass();
static void VisitRoots(RootVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
- Object* GetStackState() SHARED_REQUIRES(Locks::mutator_lock_) {
+ Object* GetStackState() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObjectVolatile<Object>(OFFSET_OF_OBJECT_MEMBER(Throwable, backtrace_));
}
- Object* GetStackTrace() SHARED_REQUIRES(Locks::mutator_lock_) {
+ Object* GetStackTrace() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObjectVolatile<Object>(OFFSET_OF_OBJECT_MEMBER(Throwable, backtrace_));
}
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index e863ea9..22cc197 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -219,13 +219,13 @@
struct NthCallerWithDexPcVisitor FINAL : public StackVisitor {
explicit NthCallerWithDexPcVisitor(Thread* thread, size_t frame)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
method_(nullptr),
dex_pc_(0),
current_frame_number_(0),
wanted_frame_number_(frame) {}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m == nullptr || m->IsRuntimeMethod()) {
// Runtime method, upcall, or resolution issue. Skip.
@@ -449,7 +449,7 @@
__attribute__((format(printf, 1, 2)));
static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
va_list args;
va_start(args, fmt);
Thread* self = Thread::Current();
@@ -1261,7 +1261,7 @@
}
}
-bool Monitor::IsLocked() SHARED_REQUIRES(Locks::mutator_lock_) {
+bool Monitor::IsLocked() REQUIRES_SHARED(Locks::mutator_lock_) {
MutexLock mu(Thread::Current(), monitor_lock_);
return owner_ != nullptr;
}
@@ -1364,7 +1364,7 @@
MonitorDeflateVisitor() : self_(Thread::Current()), deflate_count_(0) {}
virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (Monitor::Deflate(self_, object)) {
DCHECK_NE(object->GetLockWord(true).GetState(), LockWord::kFatLocked);
++deflate_count_;
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 1d829e1..c3da563 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -66,19 +66,19 @@
EXCLUSIVE_LOCK_FUNCTION(obj)
NO_THREAD_SAFETY_ANALYSIS
REQUIRES(!Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// NO_THREAD_SAFETY_ANALYSIS for mon->Unlock.
static bool MonitorExit(Thread* thread, mirror::Object* obj)
NO_THREAD_SAFETY_ANALYSIS
REQUIRES(!Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
UNLOCK_FUNCTION(obj);
- static void Notify(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static void Notify(Thread* self, mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
DoNotify(self, obj, false);
}
- static void NotifyAll(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static void NotifyAll(Thread* self, mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
DoNotify(self, obj, true);
}
@@ -86,15 +86,15 @@
// NO_THREAD_SAFETY_ANALYSIS for mon->Wait.
static void Wait(Thread* self, mirror::Object* obj, int64_t ms, int32_t ns,
bool interruptShouldThrow, ThreadState why)
- SHARED_REQUIRES(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
+ REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
static void DescribeWait(std::ostream& os, const Thread* thread)
REQUIRES(!Locks::thread_suspend_count_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Used to implement JDWP's ThreadReference.CurrentContendedMonitor.
static mirror::Object* GetContendedMonitor(Thread* thread)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Calls 'callback' once for each lock held in the single stack frame represented by
// the current state of 'stack_visitor'.
@@ -102,12 +102,12 @@
// is necessary when we have already aborted but want to dump the stack as much as we can.
static void VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::Object*, void*),
void* callback_context, bool abort_on_failure = true)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool IsValidLockWord(LockWord lock_word);
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- mirror::Object* GetObject() SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Object* GetObject() REQUIRES_SHARED(Locks::mutator_lock_) {
return obj_.Read<kReadBarrierOption>();
}
@@ -119,7 +119,7 @@
int32_t GetHashCode();
- bool IsLocked() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!monitor_lock_);
+ bool IsLocked() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!monitor_lock_);
bool HasHashCode() const {
return hash_code_.LoadRelaxed() != 0;
@@ -131,13 +131,13 @@
// Inflate the lock on obj. May fail to inflate for spurious reasons, always re-check.
static void InflateThinLocked(Thread* self, Handle<mirror::Object> obj, LockWord lock_word,
- uint32_t hash_code) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint32_t hash_code) REQUIRES_SHARED(Locks::mutator_lock_);
// Not exclusive because ImageWriter calls this during a Heap::VisitObjects() that
// does not allow a thread suspension in the middle. TODO: maybe make this exclusive.
// NO_THREAD_SAFETY_ANALYSIS for monitor->monitor_lock_.
static bool Deflate(Thread* self, mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
+ REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
#ifndef __LP64__
void* operator new(size_t size) {
@@ -155,15 +155,15 @@
private:
Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code, MonitorId id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Install the monitor into its object, may fail if another thread installs a different monitor
// first.
bool Install(Thread* self)
REQUIRES(!monitor_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Links a thread into a monitor's wait set. The monitor lock must be held by the caller of this
// routine.
@@ -178,12 +178,12 @@
// threads inflating the lock, installing hash codes and spurious failures. The caller should
// re-read the lock word following the call.
static void Inflate(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
NO_THREAD_SAFETY_ANALYSIS; // For m->Install(self)
void LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample_percent,
const char* owner_filename, int32_t owner_line_number)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void FailedUnlock(mirror::Object* obj,
uint32_t expected_owner_thread_id,
@@ -191,34 +191,34 @@
Monitor* mon)
REQUIRES(!Locks::thread_list_lock_,
!monitor_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Try to lock without blocking, returns true if we acquired the lock.
bool TryLock(Thread* self)
REQUIRES(!monitor_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Variant for already holding the monitor lock.
bool TryLockLocked(Thread* self)
REQUIRES(monitor_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void Lock(Thread* self)
REQUIRES(!monitor_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool Unlock(Thread* thread)
REQUIRES(!monitor_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void DoNotify(Thread* self, mirror::Object* obj, bool notify_all)
- SHARED_REQUIRES(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS; // For mon->Notify.
+ REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS; // For mon->Notify.
void Notify(Thread* self)
REQUIRES(!monitor_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void NotifyAll(Thread* self)
REQUIRES(!monitor_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static std::string PrettyContentionInfo(const std::string& owner_name,
pid_t owner_tid,
@@ -226,7 +226,7 @@
uint32_t owners_dex_pc,
size_t num_waiters)
REQUIRES(!Locks::thread_list_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Wait on a monitor until timeout, interrupt, or notification. Used for Object.wait() and
// (somewhat indirectly) Thread.sleep() and Thread.join().
@@ -249,13 +249,13 @@
// of the 32-bit time epoch.
void Wait(Thread* self, int64_t msec, int32_t nsec, bool interruptShouldThrow, ThreadState why)
REQUIRES(!monitor_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Translates the provided method and pc into its declaring class' source file and line number.
static void TranslateLocation(ArtMethod* method, uint32_t pc,
const char** source_file,
int32_t* line_number)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
uint32_t GetOwnerThreadId() REQUIRES(!monitor_lock_);
@@ -263,11 +263,11 @@
ALWAYS_INLINE static void AtraceMonitorLock(Thread* self,
mirror::Object* obj,
bool is_wait)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void AtraceMonitorLockImpl(Thread* self,
mirror::Object* obj,
bool is_wait)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE static void AtraceMonitorUnlock();
static uint32_t lock_profiling_threshold_;
@@ -322,10 +322,10 @@
MonitorList();
~MonitorList();
- void Add(Monitor* m) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!monitor_list_lock_);
+ void Add(Monitor* m) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!monitor_list_lock_);
void SweepMonitorList(IsMarkedVisitor* visitor)
- REQUIRES(!monitor_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!monitor_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void DisallowNewMonitors() REQUIRES(!monitor_list_lock_);
void AllowNewMonitors() REQUIRES(!monitor_list_lock_);
void BroadcastForNewMonitors() REQUIRES(!monitor_list_lock_);
diff --git a/runtime/monitor_pool.cc b/runtime/monitor_pool.cc
index a47a4b2..0f4e238 100644
--- a/runtime/monitor_pool.cc
+++ b/runtime/monitor_pool.cc
@@ -107,7 +107,7 @@
Monitor* MonitorPool::CreateMonitorInPool(Thread* self, Thread* owner, mirror::Object* obj,
int32_t hash_code)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// We are gonna allocate, so acquire the writer lock.
MutexLock mu(self, *Locks::allocated_monitor_ids_lock_);
diff --git a/runtime/monitor_pool.h b/runtime/monitor_pool.h
index 99810e0..80bae7f 100644
--- a/runtime/monitor_pool.h
+++ b/runtime/monitor_pool.h
@@ -43,7 +43,7 @@
}
static Monitor* CreateMonitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#ifndef __LP64__
Monitor* mon = new Monitor(self, owner, obj, hash_code);
DCHECK_ALIGNED(mon, LockWord::kMonitorIdAlignment);
@@ -123,7 +123,7 @@
void FreeInternal() NO_THREAD_SAFETY_ANALYSIS;
Monitor* CreateMonitorInPool(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void ReleaseMonitorToPool(Thread* self, Monitor* monitor);
void ReleaseMonitorsToPool(Thread* self, MonitorList::Monitors* monitors);
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index 48d256c..ac6a4f3 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -61,7 +61,7 @@
static void FillHeap(Thread* self, ClassLinker* class_linker,
std::unique_ptr<StackHandleScope<kMaxHandles>>* hsp,
std::vector<MutableHandle<mirror::Object>>* handles)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Runtime::Current()->GetHeap()->SetIdealFootprint(1 * GB);
hsp->reset(new StackHandleScope<kMaxHandles>(self));
diff --git a/runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.cc b/runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.cc
new file mode 100644
index 0000000..1761799
--- /dev/null
+++ b/runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.cc
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dalvik_system_InMemoryDexClassLoader_DexData.h"
+
+#include "class_linker.h"
+#include "common_throws.h"
+#include "dex_file.h"
+#include "jni_internal.h"
+#include "mem_map.h"
+#include "mirror/class_loader.h"
+#include "mirror/object-inl.h"
+#include "scoped_thread_state_change.h"
+#include "ScopedUtfChars.h"
+
+namespace art {
+
+static std::unique_ptr<MemMap> AllocateDexMemoryMap(JNIEnv* env, jint start, jint end) {
+ if (end <= start) {
+ ScopedObjectAccess soa(env);
+ ThrowWrappedIOException("Bad range");
+ return nullptr;
+ }
+
+ std::string error_message;
+ size_t length = static_cast<size_t>(end - start);
+ std::unique_ptr<MemMap> dex_mem_map(MemMap::MapAnonymous("DEX data",
+ nullptr,
+ length,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_message));
+ if (dex_mem_map == nullptr) {
+ ScopedObjectAccess soa(env);
+ ThrowWrappedIOException("%s", error_message.c_str());
+ }
+ return dex_mem_map;
+}
+
+static jlong DexFileToCookie(const DexFile* dex_file) {
+ return reinterpret_cast<jlong>(dex_file);
+}
+
+static const DexFile* CookieToDexFile(jlong cookie) {
+ return reinterpret_cast<const DexFile*>(cookie);
+}
+
+static const DexFile* CreateDexFile(JNIEnv* env, std::unique_ptr<MemMap> dex_mem_map) {
+ std::string location = StringPrintf("InMemoryDexClassLoader_DexData@%p-%p",
+ dex_mem_map->Begin(),
+ dex_mem_map->End());
+ std::string error_message;
+ std::unique_ptr<const DexFile> dex_file(DexFile::Open(location,
+ 0,
+ std::move(dex_mem_map),
+ /* verify */ true,
+ /* verify_location */ true,
+ &error_message));
+ if (dex_file == nullptr) {
+ ScopedObjectAccess soa(env);
+ ThrowWrappedIOException("%s", error_message.c_str());
+ return nullptr;
+ }
+
+ if (!dex_file->DisableWrite()) {
+ ScopedObjectAccess soa(env);
+ ThrowWrappedIOException("Failed to make dex file read-only");
+ return nullptr;
+ }
+
+ return dex_file.release();
+}
+
+static jlong InMemoryDexClassLoader_DexData_initializeWithDirectBuffer(
+ JNIEnv* env, jclass, jobject buffer, jint start, jint end) {
+ uint8_t* base_address = reinterpret_cast<uint8_t*>(env->GetDirectBufferAddress(buffer));
+ if (base_address == nullptr) {
+ ScopedObjectAccess soa(env);
+ ThrowWrappedIOException("dexFileBuffer not direct");
+ return 0;
+ }
+
+ std::unique_ptr<MemMap> dex_mem_map(AllocateDexMemoryMap(env, start, end));
+ if (dex_mem_map == nullptr) {
+ DCHECK(Thread::Current()->IsExceptionPending());
+ return 0;
+ }
+
+ size_t length = static_cast<size_t>(end - start);
+ memcpy(dex_mem_map->Begin(), base_address, length);
+ return DexFileToCookie(CreateDexFile(env, std::move(dex_mem_map)));
+}
+
+static jlong InMemoryDexClassLoader_DexData_initializeWithArray(
+ JNIEnv* env, jclass, jbyteArray buffer, jint start, jint end) {
+ std::unique_ptr<MemMap> dex_mem_map(AllocateDexMemoryMap(env, start, end));
+ if (dex_mem_map == nullptr) {
+ DCHECK(Thread::Current()->IsExceptionPending());
+ return 0;
+ }
+
+ auto destination = reinterpret_cast<jbyte*>(dex_mem_map.get()->Begin());
+ env->GetByteArrayRegion(buffer, start, end - start, destination);
+ return DexFileToCookie(CreateDexFile(env, std::move(dex_mem_map)));
+}
+
+static void InMemoryDexClassLoader_DexData_uninitialize(JNIEnv* env, jclass, jlong cookie) {
+ const DexFile* dex_file = CookieToDexFile(cookie);
+ if (kIsDebugBuild) {
+ ScopedObjectAccess soa(env);
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ CHECK(class_linker->FindDexCache(soa.Self(), *dex_file, true) == nullptr);
+ }
+ delete dex_file;
+}
+
+static jclass InMemoryDexClassLoader_DexData_findClass(
+ JNIEnv* env, jobject dexData, jstring name, jobject loader, jlong cookie) {
+ ScopedUtfChars scoped_class_name(env, name);
+ if (env->ExceptionCheck()) {
+ return nullptr;
+ }
+
+ const char* class_name = scoped_class_name.c_str();
+ const std::string descriptor(DotToDescriptor(class_name));
+ const char* class_descriptor = descriptor.c_str();
+ const size_t hash = ComputeModifiedUtf8Hash(class_descriptor);
+ const DexFile* dex_file = CookieToDexFile(cookie);
+ const DexFile::ClassDef* dex_class_def = dex_file->FindClassDef(class_descriptor, hash);
+ if (dex_class_def != nullptr) {
+ ScopedObjectAccess soa(env);
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ StackHandleScope<1> handle_scope(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ handle_scope.NewHandle(soa.Decode<mirror::ClassLoader*>(loader)));
+ class_linker->RegisterDexFile(*dex_file, class_loader.Get());
+ mirror::Class* result = class_linker->DefineClass(
+ soa.Self(), class_descriptor, hash, class_loader, *dex_file, *dex_class_def);
+ if (result != nullptr) {
+ // Ensure the class table has a strong reference to the
+ // InMemoryClassLoader/DexData instance now that a class has
+ // been loaded.
+ class_linker->InsertDexFileInToClassLoader(
+ soa.Decode<mirror::Object*>(dexData), class_loader.Get());
+ return soa.AddLocalReference<jclass>(result);
+ }
+ }
+
+ VLOG(class_linker) << "Failed to find dex_class_def " << class_name;
+ return nullptr;
+}
+
+static JNINativeMethod gMethods[] = {
+ NATIVE_METHOD(InMemoryDexClassLoader_DexData,
+ initializeWithDirectBuffer,
+ "(Ljava/nio/ByteBuffer;II)J"),
+ NATIVE_METHOD(InMemoryDexClassLoader_DexData, initializeWithArray, "([BII)J"),
+ NATIVE_METHOD(InMemoryDexClassLoader_DexData, uninitialize, "(J)V"),
+ NATIVE_METHOD(InMemoryDexClassLoader_DexData,
+ findClass,
+ "(Ljava/lang/String;Ljava/lang/ClassLoader;J)Ljava/lang/Class;"),
+};
+
+void register_dalvik_system_InMemoryDexClassLoader_DexData(JNIEnv* env) {
+ REGISTER_NATIVE_METHODS("dalvik/system/InMemoryDexClassLoader$DexData");
+}
+
+} // namespace art
diff --git a/runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.h b/runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.h
new file mode 100644
index 0000000..f73d07a
--- /dev/null
+++ b/runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_DALVIK_SYSTEM_INMEMORYDEXCLASSLOADER_DEXDATA_H_
+#define ART_RUNTIME_NATIVE_DALVIK_SYSTEM_INMEMORYDEXCLASSLOADER_DEXDATA_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_dalvik_system_InMemoryDexClassLoader_DexData(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_DALVIK_SYSTEM_INMEMORYDEXCLASSLOADER_DEXDATA_H_
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 45e49e2..d88c9d4 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -270,7 +270,7 @@
explicit PreloadDexCachesStringsVisitor(StringTable* table) : table_(table) { }
void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::String* string = root->AsString();
table_->operator[](string->ToModifiedUtf8()) = string;
}
@@ -282,7 +282,7 @@
// Based on ClassLinker::ResolveString.
static void PreloadDexCachesResolveString(
Handle<mirror::DexCache> dex_cache, uint32_t string_idx, StringTable& strings)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::String* string = dex_cache->GetResolvedString(string_idx);
if (string != nullptr) {
return;
@@ -300,7 +300,7 @@
// Based on ClassLinker::ResolveType.
static void PreloadDexCachesResolveType(
Thread* self, mirror::DexCache* dex_cache, uint32_t type_idx)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Class* klass = dex_cache->GetResolvedType(type_idx);
if (klass != nullptr) {
return;
@@ -329,7 +329,7 @@
// Based on ClassLinker::ResolveField.
static void PreloadDexCachesResolveField(Handle<mirror::DexCache> dex_cache, uint32_t field_idx,
bool is_static)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* field = dex_cache->GetResolvedField(field_idx, kRuntimePointerSize);
if (field != nullptr) {
return;
@@ -357,7 +357,7 @@
// Based on ClassLinker::ResolveMethod.
static void PreloadDexCachesResolveMethod(Handle<mirror::DexCache> dex_cache, uint32_t method_idx,
InvokeType invoke_type)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = dex_cache->GetResolvedMethod(method_idx, kRuntimePointerSize);
if (method != nullptr) {
return;
@@ -431,7 +431,7 @@
}
static void PreloadDexCachesStatsFilled(DexCacheStats* filled)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!kPreloadDexCachesCollectStats) {
return;
}
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index 9e12806..9da40b9 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -29,7 +29,7 @@
namespace art {
static jobject GetThreadStack(const ScopedFastNativeObjectAccess& soa, jobject peer)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
jobject trace = nullptr;
if (soa.Decode<mirror::Object*>(peer) == soa.Self()->GetPeer()) {
trace = soa.Self()->CreateInternalStackTrace<false>(soa);
@@ -85,7 +85,7 @@
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
class_loader(nullptr) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(class_loader == nullptr);
mirror::Class* c = GetMethod()->GetDeclaringClass();
// c is null for runtime methods.
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index d4e54cf..d89a334 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -44,7 +44,7 @@
ALWAYS_INLINE static inline mirror::Class* DecodeClass(
const ScopedFastNativeObjectAccess& soa, jobject java_class)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Class* c = soa.Decode<mirror::Class*>(java_class);
DCHECK(c != nullptr);
DCHECK(c->IsClass());
@@ -111,7 +111,7 @@
static mirror::ObjectArray<mirror::Field>* GetDeclaredFields(
Thread* self, mirror::Class* klass, bool public_only, bool force_resolve)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<1> hs(self);
IterationRange<StrideIterator<ArtField>> ifields = klass->GetIFields();
IterationRange<StrideIterator<ArtField>> sfields = klass->GetSFields();
@@ -192,7 +192,7 @@
// fast.
ALWAYS_INLINE static inline ArtField* FindFieldByName(
Thread* self ATTRIBUTE_UNUSED, mirror::String* name, LengthPrefixedArray<ArtField>* fields)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (fields == nullptr) {
return nullptr;
}
@@ -237,7 +237,7 @@
ALWAYS_INLINE static inline mirror::Field* GetDeclaredField(
Thread* self, mirror::Class* c, mirror::String* name)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* art_field = FindFieldByName(self, name, c->GetIFieldsPtr());
if (art_field != nullptr) {
return mirror::Field::CreateFromArtField<kRuntimePointerSize>(self, art_field, true);
@@ -251,7 +251,7 @@
static mirror::Field* GetPublicFieldRecursive(
Thread* self, mirror::Class* clazz, mirror::String* name)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(clazz != nullptr);
DCHECK(name != nullptr);
DCHECK(self != nullptr);
@@ -352,7 +352,7 @@
}
static ALWAYS_INLINE inline bool MethodMatchesConstructor(ArtMethod* m, bool public_only)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(m != nullptr);
return (!public_only || m->IsPublic()) && !m->IsStatic() && m->IsConstructor();
}
diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc
index 9e2d68d..1b399aa 100644
--- a/runtime/native/java_lang_System.cc
+++ b/runtime/native/java_lang_System.cc
@@ -36,7 +36,7 @@
*/
static void ThrowArrayStoreException_NotAnArray(const char* identifier, mirror::Object* array)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::string actualType(PrettyTypeOf(array));
Thread* self = Thread::Current();
self->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;",
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index aac800a..5a4ced2 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -32,7 +32,7 @@
template<bool kIsSet>
ALWAYS_INLINE inline static bool VerifyFieldAccess(Thread* self, mirror::Field* field,
mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (kIsSet && field->IsFinal()) {
ThrowIllegalAccessException(
StringPrintf("Cannot set %s field %s of class %s",
@@ -60,7 +60,7 @@
template<bool kAllowReferences>
ALWAYS_INLINE inline static bool GetFieldValue(mirror::Object* o, mirror::Field* f,
Primitive::Type field_type, JValue* value)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_EQ(value->GetJ(), INT64_C(0));
MemberOffset offset(f->GetOffset());
const bool is_volatile = f->IsVolatile();
@@ -105,7 +105,7 @@
ALWAYS_INLINE inline static bool CheckReceiver(const ScopedFastNativeObjectAccess& soa,
jobject j_rcvr, mirror::Field** f,
mirror::Object** class_or_rcvr)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
soa.Self()->AssertThreadSuspensionIsAllowable();
mirror::Class* declaringClass = (*f)->GetDeclaringClass();
if ((*f)->IsStatic()) {
@@ -232,7 +232,7 @@
ALWAYS_INLINE inline static void SetFieldValue(mirror::Object* o, mirror::Field* f,
Primitive::Type field_type, bool allow_references,
const JValue& new_value)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(f->GetDeclaringClass()->IsInitialized());
MemberOffset offset(f->GetOffset());
const bool is_volatile = f->IsVolatile();
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
index 9ce4a02..9ed0e7e 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
@@ -23,7 +23,6 @@
#include "scoped_fast_native_object_access.h"
#include "ScopedLocalRef.h"
#include "ScopedPrimitiveArray.h"
-#include "stack.h"
#include "thread_list.h"
namespace art {
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index 858849f..472340c 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -305,7 +305,7 @@
static void copyToArray(jlong srcAddr, mirror::PrimitiveArray<T>* array,
size_t array_offset,
size_t size)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const T* src = reinterpret_cast<T*>(srcAddr);
size_t sz = size / sizeof(T);
size_t of = array_offset / sizeof(T);
@@ -318,7 +318,7 @@
static void copyFromArray(jlong dstAddr, mirror::PrimitiveArray<T>* array,
size_t array_offset,
size_t size)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
T* dst = reinterpret_cast<T*>(dstAddr);
size_t sz = size / sizeof(T);
size_t of = array_offset / sizeof(T);
diff --git a/runtime/nth_caller_visitor.h b/runtime/nth_caller_visitor.h
index e9b0d3c..f72a853 100644
--- a/runtime/nth_caller_visitor.h
+++ b/runtime/nth_caller_visitor.h
@@ -33,7 +33,7 @@
count(0),
caller(nullptr) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
bool do_count = false;
if (m == nullptr || m->IsRuntimeMethod()) {
diff --git a/runtime/oat.h b/runtime/oat.h
index 7c84fe9..35d0c92 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '0', '8', '6', '\0' };
+ static constexpr uint8_t kOatVersion[] = { '0', '8', '7', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 05c5a22..6ec5e55 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -87,8 +87,10 @@
bool with_patch_info = true) {
// Temporarily redirect the dalvik cache so dex2oat doesn't find the
// relocated image file.
- std::string android_data_tmp = GetScratchDir() + "AndroidDataTmp";
- setenv("ANDROID_DATA", android_data_tmp.c_str(), 1);
+ std::string dalvik_cache = GetDalvikCache(GetInstructionSetString(kRuntimeISA));
+ std::string dalvik_cache_tmp = dalvik_cache + ".redirected";
+ ASSERT_EQ(0, rename(dalvik_cache.c_str(), dalvik_cache_tmp.c_str())) << strerror(errno);
+
std::vector<std::string> args;
args.push_back("--dex-file=" + dex_location);
args.push_back("--oat-file=" + odex_location);
@@ -106,7 +108,7 @@
std::string error_msg;
ASSERT_TRUE(OatFileAssistant::Dex2Oat(args, &error_msg)) << error_msg;
- setenv("ANDROID_DATA", android_data_.c_str(), 1);
+ ASSERT_EQ(0, rename(dalvik_cache_tmp.c_str(), dalvik_cache.c_str())) << strerror(errno);
// Verify the odex file was generated as expected and really is
// unrelocated.
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 2e67ffe..6d4b2f6 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -226,7 +226,7 @@
static void IterateOverJavaDexFile(mirror::Object* dex_file,
ArtField* const cookie_field,
std::function<bool(const DexFile*)> fn)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (dex_file != nullptr) {
mirror::LongArray* long_array = cookie_field->GetObject(dex_file)->AsLongArray();
if (long_array == nullptr) {
@@ -250,7 +250,7 @@
ScopedObjectAccessAlreadyRunnable& soa,
Handle<mirror::ClassLoader> class_loader,
MutableHandle<mirror::ObjectArray<mirror::Object>> dex_elements,
- std::function<bool(const DexFile*)> fn) SHARED_REQUIRES(Locks::mutator_lock_) {
+ std::function<bool(const DexFile*)> fn) REQUIRES_SHARED(Locks::mutator_lock_) {
// Handle this step.
// Handle as if this is the child PathClassLoader.
// The class loader is a PathClassLoader which inherits from BaseDexClassLoader.
@@ -286,7 +286,7 @@
static bool GetDexFilesFromClassLoader(
ScopedObjectAccessAlreadyRunnable& soa,
mirror::ClassLoader* class_loader,
- std::priority_queue<DexFileAndClassPair>* queue) SHARED_REQUIRES(Locks::mutator_lock_) {
+ std::priority_queue<DexFileAndClassPair>* queue) REQUIRES_SHARED(Locks::mutator_lock_) {
if (ClassLinker::IsBootClassLoader(soa, class_loader)) {
// The boot class loader. We don't load any of these files, as we know we compiled against
// them correctly.
@@ -308,7 +308,7 @@
// Collect all the dex files.
auto GetDexFilesFn = [&] (const DexFile* cp_dex_file)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (cp_dex_file->NumClassDefs() > 0) {
queue->emplace(cp_dex_file, 0U, true);
}
@@ -329,7 +329,7 @@
static void GetDexFilesFromDexElementsArray(
ScopedObjectAccessAlreadyRunnable& soa,
Handle<mirror::ObjectArray<mirror::Object>> dex_elements,
- std::priority_queue<DexFileAndClassPair>* queue) SHARED_REQUIRES(Locks::mutator_lock_) {
+ std::priority_queue<DexFileAndClassPair>* queue) REQUIRES_SHARED(Locks::mutator_lock_) {
if (dex_elements.Get() == nullptr) {
// Nothing to do.
return;
@@ -345,7 +345,7 @@
// Collect all the dex files.
auto GetDexFilesFn = [&] (const DexFile* cp_dex_file)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (cp_dex_file != nullptr && cp_dex_file->NumClassDefs() > 0) {
queue->emplace(cp_dex_file, 0U, true);
}
diff --git a/runtime/object_lock.h b/runtime/object_lock.h
index 7f02b37..5916f90 100644
--- a/runtime/object_lock.h
+++ b/runtime/object_lock.h
@@ -28,15 +28,15 @@
template <typename T>
class ObjectLock {
public:
- ObjectLock(Thread* self, Handle<T> object) SHARED_REQUIRES(Locks::mutator_lock_);
+ ObjectLock(Thread* self, Handle<T> object) REQUIRES_SHARED(Locks::mutator_lock_);
- ~ObjectLock() SHARED_REQUIRES(Locks::mutator_lock_);
+ ~ObjectLock() REQUIRES_SHARED(Locks::mutator_lock_);
- void WaitIgnoringInterrupts() SHARED_REQUIRES(Locks::mutator_lock_);
+ void WaitIgnoringInterrupts() REQUIRES_SHARED(Locks::mutator_lock_);
- void Notify() SHARED_REQUIRES(Locks::mutator_lock_);
+ void Notify() REQUIRES_SHARED(Locks::mutator_lock_);
- void NotifyAll() SHARED_REQUIRES(Locks::mutator_lock_);
+ void NotifyAll() REQUIRES_SHARED(Locks::mutator_lock_);
private:
Thread* const self_;
@@ -48,9 +48,9 @@
template <typename T>
class ObjectTryLock {
public:
- ObjectTryLock(Thread* self, Handle<T> object) SHARED_REQUIRES(Locks::mutator_lock_);
+ ObjectTryLock(Thread* self, Handle<T> object) REQUIRES_SHARED(Locks::mutator_lock_);
- ~ObjectTryLock() SHARED_REQUIRES(Locks::mutator_lock_);
+ ~ObjectTryLock() REQUIRES_SHARED(Locks::mutator_lock_);
bool Acquired() const {
return acquired_;
diff --git a/runtime/openjdkjvm/Android.bp b/runtime/openjdkjvm/Android.bp
index 3e8dc8c..5ed1615 100644
--- a/runtime/openjdkjvm/Android.bp
+++ b/runtime/openjdkjvm/Android.bp
@@ -31,8 +31,8 @@
art_cc_library {
name: "libopenjdkjvmd",
defaults: [
- "libopenjdkjvm_defaults",
"art_debug_defaults",
+ "libopenjdkjvm_defaults",
],
shared_libs: ["libartd"],
}
diff --git a/runtime/openjdkjvmti/Android.bp b/runtime/openjdkjvmti/Android.bp
index 4430248..977ef44 100644
--- a/runtime/openjdkjvmti/Android.bp
+++ b/runtime/openjdkjvmti/Android.bp
@@ -31,8 +31,8 @@
art_cc_library {
name: "libopenjdkjvmtid",
defaults: [
- "libopenjdkjvmti_defaults",
"art_debug_defaults",
+ "libopenjdkjvmti_defaults",
],
shared_libs: ["libartd"],
}
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index 82e57b4..e3f92c7 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -35,7 +35,7 @@
mirror::Class* GenerateProxyClass(ScopedObjectAccess& soa, jobject jclass_loader,
const char* className,
const std::vector<mirror::Class*>& interfaces)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Class* javaLangObject = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;");
CHECK(javaLangObject != nullptr);
diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc
index a6e3693..dc6f4eb 100644
--- a/runtime/quick/inline_method_analyser.cc
+++ b/runtime/quick/inline_method_analyser.cc
@@ -141,7 +141,7 @@
// Used for a single invoke in a constructor. In that situation, the method verifier makes
// sure we invoke a constructor either in the same class or superclass with at least "this".
ArtMethod* GetTargetConstructor(ArtMethod* method, const Instruction* invoke_direct)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_EQ(invoke_direct->Opcode(), Instruction::INVOKE_DIRECT);
DCHECK_EQ(invoke_direct->VRegC_35c(),
method->GetCodeItem()->registers_size_ - method->GetCodeItem()->ins_size_);
@@ -212,7 +212,7 @@
uint16_t this_vreg,
uint16_t zero_vreg_mask,
/*inout*/ ConstructorIPutData (&iputs)[kMaxConstructorIPuts])
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsInstructionIPut(new_iput->Opcode()));
uint32_t field_index = new_iput->VRegC_22c();
PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
@@ -253,7 +253,7 @@
bool DoAnalyseConstructor(const DexFile::CodeItem* code_item,
ArtMethod* method,
/*inout*/ ConstructorIPutData (&iputs)[kMaxConstructorIPuts])
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// On entry we should not have any IPUTs yet.
DCHECK_EQ(0, std::count_if(
iputs,
@@ -367,7 +367,7 @@
bool AnalyseConstructor(const DexFile::CodeItem* code_item,
ArtMethod* method,
InlineMethod* result)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ConstructorIPutData iputs[kMaxConstructorIPuts];
if (!DoAnalyseConstructor(code_item, method, iputs)) {
return false;
diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h
index 0e12d73..356e290 100644
--- a/runtime/quick/inline_method_analyser.h
+++ b/runtime/quick/inline_method_analyser.h
@@ -217,9 +217,9 @@
* @return true if the method is a candidate for inlining, false otherwise.
*/
static bool AnalyseMethodCode(verifier::MethodVerifier* verifier, InlineMethod* result)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool AnalyseMethodCode(ArtMethod* method, InlineMethod* result)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static constexpr bool IsInstructionIGet(Instruction::Code opcode) {
return Instruction::IGET <= opcode && opcode <= Instruction::IGET_SHORT;
@@ -246,7 +246,7 @@
bool is_static,
ArtMethod* method,
InlineMethod* result)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool AnalyseReturnMethod(const DexFile::CodeItem* code_item, InlineMethod* result);
static bool AnalyseConstMethod(const DexFile::CodeItem* code_item, InlineMethod* result);
static bool AnalyseIGetMethod(const DexFile::CodeItem* code_item,
@@ -254,13 +254,13 @@
bool is_static,
ArtMethod* method,
InlineMethod* result)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool AnalyseIPutMethod(const DexFile::CodeItem* code_item,
const MethodReference& method_ref,
bool is_static,
ArtMethod* method,
InlineMethod* result)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Can we fast path instance field access in a verified accessor?
// If yes, computes field's offset and volatility and whether the method is static or not.
@@ -268,7 +268,7 @@
uint32_t field_idx,
bool is_put,
InlineIGetIPutData* result)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
};
} // namespace art
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 46d9e7f..55aba2b 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -30,6 +30,7 @@
#include "mirror/class_loader.h"
#include "mirror/throwable.h"
#include "oat_quick_method_header.h"
+#include "stack.h"
#include "stack_map.h"
#include "verifier/method_verifier.h"
@@ -59,13 +60,13 @@
public:
CatchBlockStackVisitor(Thread* self, Context* context, Handle<mirror::Throwable>* exception,
QuickExceptionHandler* exception_handler)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
exception_(exception),
exception_handler_(exception_handler) {
}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = GetMethod();
exception_handler_->SetHandlerFrameDepth(GetFrameDepth());
if (method == nullptr) {
@@ -97,7 +98,7 @@
private:
bool HandleTryItems(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t dex_pc = DexFile::kDexNoIndex;
if (!method->IsNative()) {
dex_pc = GetDexPc();
@@ -284,7 +285,7 @@
Context* context,
QuickExceptionHandler* exception_handler,
bool single_frame)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
exception_handler_(exception_handler),
prev_shadow_frame_(nullptr),
@@ -304,7 +305,7 @@
return single_frame_deopt_quick_method_header_;
}
- void FinishStackWalk() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void FinishStackWalk() REQUIRES_SHARED(Locks::mutator_lock_) {
// This is the upcall, or the next full frame in single-frame deopt, or the
// code isn't deoptimizeable. We remember the frame and last pc so that we
// may long jump to them.
@@ -327,7 +328,7 @@
}
}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
exception_handler_->SetHandlerFrameDepth(GetFrameDepth());
ArtMethod* method = GetMethod();
if (method == nullptr || single_frame_done_) {
@@ -396,7 +397,7 @@
void HandleOptimizingDeoptimization(ArtMethod* m,
ShadowFrame* new_frame,
const bool* updated_vregs)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
CodeInfo code_info = method_header->GetOptimizedCodeInfo();
uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
@@ -577,14 +578,14 @@
class InstrumentationStackVisitor : public StackVisitor {
public:
InstrumentationStackVisitor(Thread* self, size_t frame_depth)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
frame_depth_(frame_depth),
instrumentation_frames_to_pop_(0) {
CHECK_NE(frame_depth_, kInvalidFrameDepth);
}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
size_t current_frame_depth = GetFrameDepth();
if (current_frame_depth < frame_depth_) {
CHECK(GetMethod() != nullptr);
@@ -647,11 +648,11 @@
class DumpFramesWithTypeStackVisitor FINAL : public StackVisitor {
public:
explicit DumpFramesWithTypeStackVisitor(Thread* self, bool show_details = false)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
show_details_(show_details) {}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = GetMethod();
if (show_details_) {
LOG(INFO) << "|> pc = " << std::hex << GetCurrentQuickFramePc();
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index 74b7d0d..5592126 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -20,7 +20,7 @@
#include "base/logging.h"
#include "base/macros.h"
#include "base/mutex.h"
-#include "stack.h" // StackReference
+#include "stack_reference.h"
namespace art {
@@ -29,14 +29,16 @@
} // namespace mirror
class ArtMethod;
class Context;
+class OatQuickMethodHeader;
class Thread;
class ShadowFrame;
+class StackVisitor;
// Manages exception delivery for Quick backend.
class QuickExceptionHandler {
public:
QuickExceptionHandler(Thread* self, bool is_deoptimization)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
NO_RETURN ~QuickExceptionHandler() {
LOG(FATAL) << "UNREACHABLE"; // Expected to take long jump.
@@ -44,12 +46,12 @@
}
// Find the catch handler for the given exception.
- void FindCatch(mirror::Throwable* exception) SHARED_REQUIRES(Locks::mutator_lock_);
+ void FindCatch(mirror::Throwable* exception) REQUIRES_SHARED(Locks::mutator_lock_);
// Deoptimize the stack to the upcall/some code that's not deoptimizeable. For
// every compiled frame, we create a "copy" shadow frame that will be executed
// with the interpreter.
- void DeoptimizeStack() SHARED_REQUIRES(Locks::mutator_lock_);
+ void DeoptimizeStack() REQUIRES_SHARED(Locks::mutator_lock_);
// Deoptimize a single frame. It's directly triggered from compiled code. It
// has the following properties:
@@ -60,22 +62,22 @@
// the result of IsDeoptimizeable().
// - It can be either full-fragment, or partial-fragment deoptimization, depending
// on whether that single frame covers full or partial fragment.
- void DeoptimizeSingleFrame() SHARED_REQUIRES(Locks::mutator_lock_);
+ void DeoptimizeSingleFrame() REQUIRES_SHARED(Locks::mutator_lock_);
void DeoptimizePartialFragmentFixup(uintptr_t return_pc)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Update the instrumentation stack by removing all methods that will be unwound
// by the exception being thrown.
// Return the return pc of the last frame that's unwound.
- uintptr_t UpdateInstrumentationStack() SHARED_REQUIRES(Locks::mutator_lock_);
+ uintptr_t UpdateInstrumentationStack() REQUIRES_SHARED(Locks::mutator_lock_);
// Set up environment before delivering an exception to optimized code.
void SetCatchEnvironmentForOptimizedHandler(StackVisitor* stack_visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Long jump either to a catch handler or to the upcall.
- NO_RETURN void DoLongJump(bool smash_caller_saves = true) SHARED_REQUIRES(Locks::mutator_lock_);
+ NO_RETURN void DoLongJump(bool smash_caller_saves = true) REQUIRES_SHARED(Locks::mutator_lock_);
void SetHandlerQuickFrame(ArtMethod** handler_quick_frame) {
handler_quick_frame_ = handler_quick_frame;
@@ -128,7 +130,7 @@
// Walk the stack frames of the given thread, printing out non-runtime methods with their types
// of frames. Helps to verify that partial-fragment deopt really works as expected.
static void DumpFramesWithType(Thread* self, bool details = false)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
Thread* const self_;
@@ -159,7 +161,7 @@
bool full_fragment_done_;
void PrepareForLongJumpToInvokeStubOrInterpreterBridge()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
DISALLOW_COPY_AND_ASSIGN(QuickExceptionHandler);
};
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index 5d32c09..a861861 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -48,39 +48,39 @@
bool kAlwaysUpdateField = false>
ALWAYS_INLINE static MirrorType* Barrier(
mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// It's up to the implementation whether the given root gets updated
// whereas the return value must be an updated reference.
template <typename MirrorType, ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ALWAYS_INLINE static MirrorType* BarrierForRoot(MirrorType** root,
GcRootSource* gc_root_source = nullptr)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// It's up to the implementation whether the given root gets updated
// whereas the return value must be an updated reference.
template <typename MirrorType, ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ALWAYS_INLINE static MirrorType* BarrierForRoot(mirror::CompressedReference<MirrorType>* root,
GcRootSource* gc_root_source = nullptr)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool IsDuringStartup();
// Without the holder object.
static void AssertToSpaceInvariant(mirror::Object* ref)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
}
// With the holder object.
static void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
mirror::Object* ref)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// With GcRootSource.
static void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// ALWAYS_INLINE on this caused a performance regression b/26744236.
- static mirror::Object* Mark(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ static mirror::Object* Mark(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
static mirror::Object* WhitePtr() {
return reinterpret_cast<mirror::Object*>(white_ptr_);
@@ -94,7 +94,7 @@
ALWAYS_INLINE static bool HasGrayReadBarrierPointer(mirror::Object* obj,
uintptr_t* out_rb_ptr_high_bits)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Note: These couldn't be constexpr pointers as reinterpret_cast isn't compatible with them.
static constexpr uintptr_t white_ptr_ = 0x0; // Not marked.
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index 49b6a38..f04d41d 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -62,7 +62,7 @@
// If "obj" is an array, return the number of elements in the array.
// Otherwise, return zero.
-static size_t GetElementCount(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
+static size_t GetElementCount(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
// We assume the special cleared value isn't an array in the if statement below.
DCHECK(!Runtime::Current()->GetClearedJniWeakGlobal()->IsArrayInstance());
if (obj == nullptr || !obj->IsArrayInstance()) {
@@ -78,7 +78,7 @@
// or equivalent to the original.
static void DumpSummaryLine(std::ostream& os, mirror::Object* obj, size_t element_count,
int identical, int equiv)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (obj == nullptr) {
os << " null reference (count=" << equiv << ")\n";
return;
diff --git a/runtime/reference_table.h b/runtime/reference_table.h
index f90ccd1..992ded0 100644
--- a/runtime/reference_table.h
+++ b/runtime/reference_table.h
@@ -41,22 +41,22 @@
ReferenceTable(const char* name, size_t initial_size, size_t max_size);
~ReferenceTable();
- void Add(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ void Add(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
- void Remove(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ void Remove(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
size_t Size() const;
- void Dump(std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_);
+ void Dump(std::ostream& os) REQUIRES_SHARED(Locks::mutator_lock_);
void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
typedef std::vector<GcRoot<mirror::Object>,
TrackingAllocator<GcRoot<mirror::Object>, kAllocatorTagReferenceTable>> Table;
static void Dump(std::ostream& os, Table& entries)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
friend class IndirectReferenceTable; // For Dump.
std::string name_;
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 8a531d9..f2af3da 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -29,7 +29,7 @@
#include "mirror/object_array-inl.h"
#include "nth_caller_visitor.h"
#include "scoped_thread_state_change.h"
-#include "stack.h"
+#include "stack_reference.h"
#include "well_known_classes.h"
namespace art {
@@ -72,7 +72,7 @@
num_bytes_ += 4;
}
- void Append(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Append(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
Append(StackReference<mirror::Object>::FromMirrorPtr(obj).AsVRegValue());
}
@@ -96,7 +96,7 @@
void BuildArgArrayFromVarArgs(const ScopedObjectAccessAlreadyRunnable& soa,
mirror::Object* receiver, va_list ap)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Set receiver if non-null (method is not static)
if (receiver != nullptr) {
Append(receiver);
@@ -132,7 +132,7 @@
void BuildArgArrayFromJValues(const ScopedObjectAccessAlreadyRunnable& soa,
mirror::Object* receiver, jvalue* args)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Set receiver if non-null (method is not static)
if (receiver != nullptr) {
Append(receiver);
@@ -171,7 +171,7 @@
}
void BuildArgArrayFromFrame(ShadowFrame* shadow_frame, uint32_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Set receiver if non-null (method is not static)
size_t cur_arg = arg_offset;
if (!shadow_frame->GetMethod()->IsStatic()) {
@@ -206,7 +206,7 @@
static void ThrowIllegalPrimitiveArgumentException(const char* expected,
const char* found_descriptor)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ThrowIllegalArgumentException(
StringPrintf("Invalid primitive conversion from %s to %s", expected,
PrettyDescriptor(found_descriptor).c_str()).c_str());
@@ -214,7 +214,7 @@
bool BuildArgArrayFromObjectArray(mirror::Object* receiver,
mirror::ObjectArray<mirror::Object>* args, ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile::TypeList* classes = m->GetParameterTypeList();
// Set receiver if non-null (method is not static)
if (receiver != nullptr) {
@@ -346,7 +346,7 @@
};
static void CheckMethodArguments(JavaVMExt* vm, ArtMethod* m, uint32_t* args)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile::TypeList* params = m->GetParameterTypeList();
if (params == nullptr) {
return; // No arguments so nothing to check.
@@ -424,7 +424,7 @@
}
static ArtMethod* FindVirtualMethod(mirror::Object* receiver, ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(method, kRuntimePointerSize);
}
@@ -432,7 +432,7 @@
static void InvokeWithArgArray(const ScopedObjectAccessAlreadyRunnable& soa,
ArtMethod* method, ArgArray* arg_array, JValue* result,
const char* shorty)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t* args = arg_array->GetArray();
if (UNLIKELY(soa.Env()->check_jni)) {
CheckMethodArguments(soa.Vm(), method->GetInterfaceMethodIfProxy(kRuntimePointerSize), args);
@@ -442,7 +442,7 @@
JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid,
va_list args)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// We want to make sure that the stack is not within a small distance from the
// protected region in case we are calling into a leaf function whose stack
// check has been elided.
@@ -740,7 +740,7 @@
}
static std::string UnboxingFailureKind(ArtField* f)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (f != nullptr) {
return "field " + PrettyField(f, false);
}
@@ -750,7 +750,7 @@
static bool UnboxPrimitive(mirror::Object* o,
mirror::Class* dst_class, ArtField* f,
JValue* unboxed_value)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
bool unbox_for_result = (f == nullptr);
if (!dst_class->IsPrimitive()) {
if (UNLIKELY(o != nullptr && !o->InstanceOf(dst_class))) {
diff --git a/runtime/reflection.h b/runtime/reflection.h
index d9c38c1..579c6b1 100644
--- a/runtime/reflection.h
+++ b/runtime/reflection.h
@@ -33,60 +33,60 @@
class ShadowFrame;
mirror::Object* BoxPrimitive(Primitive::Type src_class, const JValue& value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, ArtField* f,
JValue* unboxed_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool UnboxPrimitiveForResult(mirror::Object* o, mirror::Class* dst_class, JValue* unboxed_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE bool ConvertPrimitiveValue(bool unbox_for_result,
Primitive::Type src_class, Primitive::Type dst_class,
const JValue& src, JValue* dst)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid,
va_list args)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
JValue InvokeWithJValues(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid,
jvalue* args)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccessAlreadyRunnable& soa,
jobject obj, jmethodID mid, jvalue* args)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa,
jobject obj, jmethodID mid, va_list args)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// num_frames is number of frames we look up for access check.
jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject method, jobject receiver,
jobject args, size_t num_frames = 1)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE bool VerifyObjectIsClass(mirror::Object* o, mirror::Class* c)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool VerifyAccess(Thread* self, mirror::Object* obj, mirror::Class* declaring_class,
uint32_t access_flags, mirror::Class** calling_class, size_t num_frames)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// This version takes a known calling class.
bool VerifyAccess(Thread* self, mirror::Object* obj, mirror::Class* declaring_class,
uint32_t access_flags, mirror::Class* calling_class)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get the calling class by using a stack visitor, may return null for unattached native threads.
mirror::Class* GetCallingClass(Thread* self, size_t num_frames)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void InvalidReceiverError(mirror::Object* o, mirror::Class* c)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void UpdateReference(Thread* self, jobject obj, mirror::Object* result)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
} // namespace art
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index 016f3c7..4876ff0 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -86,7 +86,7 @@
mirror::Object** receiver,
bool is_static, const char* method_name,
const char* method_signature)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const char* class_name = is_static ? "StaticLeafMethods" : "NonStaticLeafMethods";
jobject jclass_loader(LoadDex(class_name));
Thread* self = Thread::Current();
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
index 3245ba0..2eb0bf7 100644
--- a/runtime/runtime-inl.h
+++ b/runtime/runtime-inl.h
@@ -69,13 +69,13 @@
}
inline ArtMethod* Runtime::GetCalleeSaveMethod(CalleeSaveType type)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(HasCalleeSaveMethod(type));
return GetCalleeSaveMethodUnchecked(type);
}
inline ArtMethod* Runtime::GetCalleeSaveMethodUnchecked(CalleeSaveType type)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return reinterpret_cast<ArtMethod*>(callee_save_methods_[type]);
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index ddcfb6d..ba12d33 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -26,6 +26,9 @@
#include <signal.h>
#include <sys/syscall.h>
#include "base/memory_tool.h"
+#if defined(__APPLE__)
+#include <crt_externs.h> // for _NSGetEnviron
+#endif
#include <cstdio>
#include <cstdlib>
@@ -87,6 +90,7 @@
#include "mirror/throwable.h"
#include "monitor.h"
#include "native/dalvik_system_DexFile.h"
+#include "native/dalvik_system_InMemoryDexClassLoader_DexData.h"
#include "native/dalvik_system_VMDebug.h"
#include "native/dalvik_system_VMRuntime.h"
#include "native/dalvik_system_VMStack.h"
@@ -156,6 +160,22 @@
size_t trace_file_size;
};
+namespace {
+#ifdef __APPLE__
+inline char** GetEnviron() {
+ // When Google Test is built as a framework on MacOS X, the environ variable
+ // is unavailable. Apple's documentation (man environ) recommends using
+ // _NSGetEnviron() instead.
+ return *_NSGetEnviron();
+}
+#else
+// Some POSIX platforms expect you to declare environ. extern "C" makes
+// it reside in the global namespace.
+extern "C" char** environ;
+inline char** GetEnviron() { return environ; }
+#endif
+} // namespace
+
Runtime::Runtime()
: resolution_method_(nullptr),
imt_conflict_method_(nullptr),
@@ -904,6 +924,10 @@
}
bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
+ // (b/30160149): protect subprocesses from modifications to LD_LIBRARY_PATH, etc.
+ // Take a snapshot of the environment at the time the runtime was created, for use by Exec, etc.
+ env_snapshot_.TakeSnapshot();
+
RuntimeArgumentMap runtime_options(std::move(runtime_options_in));
ScopedTrace trace(__FUNCTION__);
CHECK_EQ(sysconf(_SC_PAGE_SIZE), kPageSize);
@@ -1366,6 +1390,7 @@
void Runtime::RegisterRuntimeNativeMethods(JNIEnv* env) {
register_dalvik_system_DexFile(env);
+ register_dalvik_system_InMemoryDexClassLoader_DexData(env);
register_dalvik_system_VMDebug(env);
register_dalvik_system_VMRuntime(env);
register_dalvik_system_VMStack(env);
@@ -1904,6 +1929,12 @@
preinitialization_transaction_->RecordWeakStringRemoval(s);
}
+void Runtime::RecordResolveString(mirror::DexCache* dex_cache, uint32_t string_idx) const {
+ DCHECK(IsAotCompiler());
+ DCHECK(IsActiveTransaction());
+ preinitialization_transaction_->RecordResolveString(dex_cache, string_idx);
+}
+
void Runtime::SetFaultMessage(const std::string& message) {
MutexLock mu(Thread::Current(), fault_message_lock_);
fault_message_ = message;
@@ -1985,7 +2016,7 @@
}
bool Runtime::IsDeoptimizeable(uintptr_t code) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return !heap_->IsInBootImageOatFile(reinterpret_cast<void *>(code));
}
@@ -2021,4 +2052,22 @@
return (jit_ != nullptr) && jit_->UseJitCompilation();
}
+void Runtime::EnvSnapshot::TakeSnapshot() {
+ char** env = GetEnviron();
+ for (size_t i = 0; env[i] != nullptr; ++i) {
+ name_value_pairs_.emplace_back(new std::string(env[i]));
+ }
+ // The strings in name_value_pairs_ retain ownership of the c_str, but we assign pointers
+ // for quick use by GetSnapshot. This avoids allocation and copying cost at Exec.
+ c_env_vector_.reset(new char*[name_value_pairs_.size() + 1]);
+ for (size_t i = 0; env[i] != nullptr; ++i) {
+ c_env_vector_[i] = const_cast<char*>(name_value_pairs_[i]->c_str());
+ }
+ c_env_vector_[name_value_pairs_.size()] = nullptr;
+}
+
+char** Runtime::EnvSnapshot::GetSnapshot() const {
+ return c_env_vector_.get();
+}
+
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 6da60f2..dc14c04 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -55,8 +55,9 @@
} // namespace jit
namespace mirror {
- class ClassLoader;
class Array;
+ class ClassLoader;
+ class DexCache;
template<class T> class ObjectArray;
template<class T> class PrimitiveArray;
typedef PrimitiveArray<int8_t> ByteArray;
@@ -290,15 +291,15 @@
}
// Is the given object the special object used to mark a cleared JNI weak global?
- bool IsClearedJniWeakGlobal(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsClearedJniWeakGlobal(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
// Get the special object used to mark a cleared JNI weak global.
- mirror::Object* GetClearedJniWeakGlobal() SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Object* GetClearedJniWeakGlobal() REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::Throwable* GetPreAllocatedOutOfMemoryError() SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Throwable* GetPreAllocatedOutOfMemoryError() REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Throwable* GetPreAllocatedNoClassDefFoundError()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const std::vector<std::string>& GetProperties() const {
return properties_;
@@ -312,33 +313,33 @@
return "2.1.0";
}
- void DisallowNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_);
- void AllowNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_);
- void BroadcastForNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_);
+ void DisallowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
+ void AllowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
+ void BroadcastForNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
// Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
// clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
void VisitRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Visit image roots, only used for hprof since the GC uses the image space mod union table
// instead.
- void VisitImageRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+ void VisitImageRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
// Visit all of the roots we can do safely do concurrently.
void VisitConcurrentRoots(RootVisitor* visitor,
VisitRootFlags flags = kVisitRootFlagAllRoots)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Visit all of the non thread roots, we can do this with mutators unpaused.
void VisitNonThreadRoots(RootVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void VisitTransactionRoots(RootVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Visit all of the thread roots.
- void VisitThreadRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+ void VisitThreadRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
// Flip thread roots from from-space refs to to-space refs.
size_t FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_callback,
@@ -347,17 +348,17 @@
// Visit all other roots which must be done with mutators suspended.
void VisitNonConcurrentRoots(RootVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Sweep system weaks, the system weak is deleted if the visitor return null. Otherwise, the
// system weak is updated to be the visitor's returned value.
void SweepSystemWeaks(IsMarkedVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Constant roots are the roots which never change after the runtime is initialized, they only
// need to be visited once per GC cycle.
void VisitConstantRoots(RootVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns a special method that calls into a trampoline for runtime method resolution
ArtMethod* GetResolutionMethod();
@@ -366,9 +367,9 @@
return resolution_method_ != nullptr;
}
- void SetResolutionMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetResolutionMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* CreateResolutionMethod() SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtMethod* CreateResolutionMethod() REQUIRES_SHARED(Locks::mutator_lock_);
// Returns a special method that calls into a trampoline for runtime imt conflicts.
ArtMethod* GetImtConflictMethod();
@@ -379,11 +380,11 @@
}
void FixupConflictTables();
- void SetImtConflictMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
- void SetImtUnimplementedMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetImtConflictMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetImtUnimplementedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* CreateImtConflictMethod(LinearAlloc* linear_alloc)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns a special method that describes all callee saves being spilled to the stack.
enum CalleeSaveType {
@@ -399,17 +400,17 @@
}
ArtMethod* GetCalleeSaveMethod(CalleeSaveType type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
QuickMethodFrameInfo GetCalleeSaveMethodFrameInfo(CalleeSaveType type) const {
return callee_save_method_frame_infos_[type];
}
QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
return OFFSETOF_MEMBER(Runtime, callee_save_methods_[type]);
@@ -423,7 +424,7 @@
void SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type);
- ArtMethod* CreateCalleeSaveMethod() SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtMethod* CreateCalleeSaveMethod() REQUIRES_SHARED(Locks::mutator_lock_);
int32_t GetStat(int kind);
@@ -480,9 +481,9 @@
bool IsTransactionAborted() const;
void AbortTransactionAndThrowAbortError(Thread* self, const std::string& abort_message)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void ThrowTransactionAbortError(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value,
bool is_volatile) const;
@@ -499,7 +500,7 @@
void RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset,
mirror::Object* value, bool is_volatile) const;
void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void RecordStrongStringInsertion(mirror::String* s) const
REQUIRES(Locks::intern_table_lock_);
void RecordWeakStringInsertion(mirror::String* s) const
@@ -508,6 +509,8 @@
REQUIRES(Locks::intern_table_lock_);
void RecordWeakStringRemoval(mirror::String* s) const
REQUIRES(Locks::intern_table_lock_);
+ void RecordResolveString(mirror::DexCache* dex_cache, uint32_t string_idx) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SetFaultMessage(const std::string& message) REQUIRES(!fault_message_lock_);
// Only read by the signal handler, NO_THREAD_SAFETY_ANALYSIS to prevent lock order violations
@@ -592,7 +595,7 @@
}
// Called from class linker.
- void SetSentinel(mirror::Object* sentinel) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetSentinel(mirror::Object* sentinel) REQUIRES_SHARED(Locks::mutator_lock_);
// Create a normal LinearAlloc or low 4gb version if we are 64 bit AOT compiler.
LinearAlloc* CreateLinearAlloc();
@@ -640,7 +643,13 @@
// Returns if the code can be deoptimized. Code may be compiled with some
// optimization that makes it impossible to deoptimize.
- bool IsDeoptimizeable(uintptr_t code) const SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsDeoptimizeable(uintptr_t code) const REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Returns a saved copy of the environment (getenv/setenv values).
+ // Used by Fork to protect against overwriting LD_LIBRARY_PATH, etc.
+ char** GetEnvSnapshot() const {
+ return env_snapshot_.GetSnapshot();
+ }
private:
static void InitPlatformSignalHandlers();
@@ -864,6 +873,20 @@
// Whether zygote code is in a section that should not start threads.
bool zygote_no_threads_;
+ // Saved environment.
+ class EnvSnapshot {
+ public:
+ EnvSnapshot() = default;
+ void TakeSnapshot();
+ char** GetSnapshot() const;
+
+ private:
+ std::unique_ptr<char*[]> c_env_vector_;
+ std::vector<std::unique_ptr<std::string>> name_value_pairs_;
+
+ DISALLOW_COPY_AND_ASSIGN(EnvSnapshot);
+ } env_snapshot_;
+
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
std::ostream& operator<<(std::ostream& os, const Runtime::CalleeSaveType& rhs);
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index d1cc09a..8a1aca5 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -130,7 +130,7 @@
* it's best if we don't grab a mutex.
*/
template<typename T>
- T AddLocalReference(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ T AddLocalReference(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
DCHECK_NE(obj, Runtime::Current()->GetClearedJniWeakGlobal());
@@ -139,32 +139,32 @@
template<typename T>
T Decode(jobject obj) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
return down_cast<T>(Self()->DecodeJObject(obj));
}
ArtField* DecodeField(jfieldID fid) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
return reinterpret_cast<ArtField*>(fid);
}
- jfieldID EncodeField(ArtField* field) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ jfieldID EncodeField(ArtField* field) const REQUIRES_SHARED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
return reinterpret_cast<jfieldID>(field);
}
- ArtMethod* DecodeMethod(jmethodID mid) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ ArtMethod* DecodeMethod(jmethodID mid) const REQUIRES_SHARED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
return reinterpret_cast<ArtMethod*>(mid);
}
- jmethodID EncodeMethod(ArtMethod* method) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ jmethodID EncodeMethod(ArtMethod* method) const REQUIRES_SHARED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
return reinterpret_cast<jmethodID>(method);
diff --git a/runtime/simulator/Android.bp b/runtime/simulator/Android.bp
index 05f44e3..49322fc 100644
--- a/runtime/simulator/Android.bp
+++ b/runtime/simulator/Android.bp
@@ -16,6 +16,9 @@
cc_defaults {
name: "libart_simulator_defaults",
+ host_supported: true,
+ device_supported: false,
+
defaults: ["art_defaults"],
srcs: [
"code_simulator.cc",
@@ -29,7 +32,7 @@
include_dirs: ["art/runtime"],
}
-cc_library_host_shared {
+art_cc_library {
name: "libart-simulator",
defaults: ["libart_simulator_defaults"],
shared_libs: [
@@ -38,11 +41,11 @@
],
}
-cc_library_host_shared {
- name: "libart-simulatord",
+art_cc_library {
+ name: "libartd-simulator",
defaults: [
- "libart_simulator_defaults",
"art_debug_defaults",
+ "libart_simulator_defaults",
],
shared_libs: [
"libartd",
diff --git a/runtime/simulator/code_simulator_arm64.cc b/runtime/simulator/code_simulator_arm64.cc
index 897d4f5..c7ad1fd 100644
--- a/runtime/simulator/code_simulator_arm64.cc
+++ b/runtime/simulator/code_simulator_arm64.cc
@@ -16,6 +16,8 @@
#include "simulator/code_simulator_arm64.h"
+#include "base/logging.h"
+
using namespace vixl::aarch64; // NOLINT(build/namespaces)
namespace art {
diff --git a/runtime/stack.cc b/runtime/stack.cc
index ababf78..4678ac6 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -165,7 +165,7 @@
}
extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* StackVisitor::GetThisObject() const {
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
@@ -319,8 +319,11 @@
bool StackVisitor::GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const {
const bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
- // X86 float registers are 64-bit and the logic below does not apply.
- DCHECK(!is_float || kRuntimeISA != InstructionSet::kX86);
+ if (kRuntimeISA == InstructionSet::kX86 && is_float) {
+ // X86 float registers are 64-bit and each XMM register is provided as two separate
+ // 32-bit registers by the context.
+ reg = (kind == kDoubleHiVReg) ? (2 * reg + 1) : (2 * reg);
+ }
if (!IsAccessibleRegister(reg, is_float)) {
return false;
@@ -547,7 +550,7 @@
next_dex_pc_(0) {
}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
if (found_frame_) {
ArtMethod* method = GetMethod();
if (method != nullptr && !method->IsRuntimeMethod()) {
@@ -580,7 +583,7 @@
explicit DescribeStackVisitor(Thread* thread_in)
: StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation();
return true;
}
@@ -610,7 +613,7 @@
}
static void AssertPcIsWithinQuickCode(ArtMethod* method, uintptr_t pc)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (method->IsNative() || method->IsRuntimeMethod() || method->IsProxyMethod()) {
return;
}
@@ -708,7 +711,7 @@
// Counts the number of references in the parameter list of the corresponding method.
// Note: Thus does _not_ include "this" for non-static methods.
static uint32_t GetNumberOfReferenceArgsWithoutReceiver(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t shorty_len;
const char* shorty = method->GetShorty(&shorty_len);
uint32_t refs = 0;
diff --git a/runtime/stack.h b/runtime/stack.h
index 850d2a4..e9ed497 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -25,9 +25,9 @@
#include "base/mutex.h"
#include "dex_file.h"
#include "gc_root.h"
-#include "mirror/object_reference.h"
#include "quick/quick_method_frame_info.h"
#include "read_barrier.h"
+#include "stack_reference.h"
#include "verify_object.h"
namespace art {
@@ -45,6 +45,7 @@
class ShadowFrame;
class StackVisitor;
class Thread;
+union JValue;
// The kind of vreg being accessed in calls to Set/GetVReg.
enum VRegKind {
@@ -61,11 +62,6 @@
};
std::ostream& operator<<(std::ostream& os, const VRegKind& rhs);
-// A reference from the shadow stack to a MirrorType object within the Java heap.
-template<class MirrorType>
-class MANAGED StackReference : public mirror::CompressedReference<MirrorType> {
-};
-
// Forward declaration. Just calls the destructor.
struct ShadowFrameDeleter;
using ShadowFrameAllocaUniquePtr = std::unique_ptr<ShadowFrame, ShadowFrameDeleter>;
@@ -80,21 +76,21 @@
public:
// Add the given object to the list of monitors, that is, objects that have been locked. This
// will not throw (but be skipped if there is an exception pending on entry).
- void AddMonitor(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ void AddMonitor(Thread* self, mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
// Try to remove the given object from the monitor list, indicating an unlock operation.
// This will throw an IllegalMonitorStateException (clearing any already pending exception), in
// case that there wasn't a lock recorded for the object.
void RemoveMonitorOrThrow(Thread* self,
- const mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ const mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
// Check whether all acquired monitors have been released. This will potentially throw an
// IllegalMonitorStateException, clearing any already pending exception. Returns true if the
// check shows that everything is OK wrt/ lock counting, false otherwise.
- bool CheckAllMonitorsReleasedOrThrow(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool CheckAllMonitorsReleasedOrThrow(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
template <typename T, typename... Args>
- void VisitMonitors(T visitor, Args&&... args) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void VisitMonitors(T visitor, Args&&... args) REQUIRES_SHARED(Locks::mutator_lock_) {
if (monitors_ != nullptr) {
// Visitors may change the Object*. Be careful with the foreach loop.
for (mirror::Object*& obj : *monitors_) {
@@ -239,7 +235,7 @@
// If this returns non-null then this does not mean the vreg is currently a reference
// on non-moving collectors. Check that the raw reg with GetVReg is equal to this if not certain.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- mirror::Object* GetVRegReference(size_t i) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Object* GetVRegReference(size_t i) const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_LT(i, NumberOfVRegs());
mirror::Object* ref;
if (HasReferenceArray()) {
@@ -311,7 +307,7 @@
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetVRegReference(size_t i, mirror::Object* val) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetVRegReference(size_t i, mirror::Object* val) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_LT(i, NumberOfVRegs());
if (kVerifyFlags & kVerifyWrites) {
VerifyObject(val);
@@ -326,14 +322,14 @@
}
}
- ArtMethod* GetMethod() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ ArtMethod* GetMethod() const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(method_ != nullptr);
return method_;
}
- mirror::Object* GetThisObject() const SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Object* GetThisObject() const REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::Object* GetThisObject(uint16_t num_ins) const SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Object* GetThisObject(uint16_t num_ins) const REQUIRES_SHARED(Locks::mutator_lock_);
bool Contains(StackReference<mirror::Object>* shadow_frame_entry_obj) const {
if (HasReferenceArray()) {
@@ -479,7 +475,7 @@
: RootInfo(kRootJavaFrame, thread_id), stack_visitor_(stack_visitor), vreg_(vreg) {
}
virtual void Describe(std::ostream& os) const OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
const StackVisitor* const stack_visitor_;
@@ -556,7 +552,7 @@
return OFFSETOF_MEMBER(ManagedStack, top_shadow_frame_);
}
- size_t NumJniShadowFrameReferences() const SHARED_REQUIRES(Locks::mutator_lock_);
+ size_t NumJniShadowFrameReferences() const REQUIRES_SHARED(Locks::mutator_lock_);
bool ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const;
@@ -577,25 +573,25 @@
protected:
StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
public:
virtual ~StackVisitor() {}
// Return 'true' if we should continue to visit more frames, 'false' to stop.
- virtual bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ virtual bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
void WalkStack(bool include_transitions = false)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
Thread* GetThread() const {
return thread_;
}
- ArtMethod* GetMethod() const SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtMethod* GetMethod() const REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* GetOuterMethod() const {
return *GetCurrentQuickFrame();
@@ -605,48 +601,48 @@
return cur_shadow_frame_ != nullptr;
}
- uint32_t GetDexPc(bool abort_on_failure = true) const SHARED_REQUIRES(Locks::mutator_lock_);
+ uint32_t GetDexPc(bool abort_on_failure = true) const REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::Object* GetThisObject() const SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Object* GetThisObject() const REQUIRES_SHARED(Locks::mutator_lock_);
- size_t GetNativePcOffset() const SHARED_REQUIRES(Locks::mutator_lock_);
+ size_t GetNativePcOffset() const REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the height of the stack in the managed stack frames, including transitions.
- size_t GetFrameHeight() SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t GetFrameHeight() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetNumFrames() - cur_depth_ - 1;
}
// Returns a frame ID for JDWP use, starting from 1.
- size_t GetFrameId() SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t GetFrameId() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFrameHeight() + 1;
}
- size_t GetNumFrames() SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t GetNumFrames() REQUIRES_SHARED(Locks::mutator_lock_) {
if (num_frames_ == 0) {
num_frames_ = ComputeNumFrames(thread_, walk_kind_);
}
return num_frames_;
}
- size_t GetFrameDepth() SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t GetFrameDepth() REQUIRES_SHARED(Locks::mutator_lock_) {
return cur_depth_;
}
// Get the method and dex pc immediately after the one that's currently being visited.
bool GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
uint64_t* val) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Values will be set in debugger shadow frames. Debugger will make sure deoptimization
// is triggered to make the values effective.
bool SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Values will be set in debugger shadow frames. Debugger will make sure deoptimization
// is triggered to make the values effective.
@@ -655,7 +651,7 @@
uint64_t new_value,
VRegKind kind_lo,
VRegKind kind_hi)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
uintptr_t* GetGPRAddress(uint32_t reg) const;
@@ -671,9 +667,9 @@
return reinterpret_cast<uint32_t*>(vreg_addr);
}
- uintptr_t GetReturnPc() const SHARED_REQUIRES(Locks::mutator_lock_);
+ uintptr_t GetReturnPc() const REQUIRES_SHARED(Locks::mutator_lock_);
- void SetReturnPc(uintptr_t new_ret_pc) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetReturnPc(uintptr_t new_ret_pc) REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Return sp-relative offset for a Dalvik virtual register, compiler
@@ -763,23 +759,23 @@
return reinterpret_cast<HandleScope*>(reinterpret_cast<uintptr_t>(sp) + pointer_size);
}
- std::string DescribeLocation() const SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string DescribeLocation() const REQUIRES_SHARED(Locks::mutator_lock_);
static size_t ComputeNumFrames(Thread* thread, StackWalkKind walk_kind)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- static void DescribeStack(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void DescribeStack(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_);
const OatQuickMethodHeader* GetCurrentOatQuickMethodHeader() const {
return cur_oat_quick_method_header_;
}
- QuickMethodFrameInfo GetCurrentQuickFrameInfo() const SHARED_REQUIRES(Locks::mutator_lock_);
+ QuickMethodFrameInfo GetCurrentQuickFrameInfo() const REQUIRES_SHARED(Locks::mutator_lock_);
private:
// Private constructor known in the case that num_frames_ has already been computed.
StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind, size_t num_frames)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool IsAccessibleRegister(uint32_t reg, bool is_float) const {
return is_float ? IsAccessibleFPR(reg) : IsAccessibleGPR(reg);
@@ -796,25 +792,25 @@
uintptr_t GetFPR(uint32_t reg) const;
bool GetVRegFromDebuggerShadowFrame(uint16_t vreg, VRegKind kind, uint32_t* val) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool GetVRegPairFromDebuggerShadowFrame(uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
uint64_t* val) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg,
VRegKind kind_lo, VRegKind kind_hi,
uint64_t* val) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool GetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, VRegKind kind_lo,
uint64_t* val) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- void SanityCheckFrame() const SHARED_REQUIRES(Locks::mutator_lock_);
+ void SanityCheckFrame() const REQUIRES_SHARED(Locks::mutator_lock_);
- InlineInfo GetCurrentInlineInfo() const SHARED_REQUIRES(Locks::mutator_lock_);
+ InlineInfo GetCurrentInlineInfo() const REQUIRES_SHARED(Locks::mutator_lock_);
Thread* const thread_;
const StackWalkKind walk_kind_;
diff --git a/runtime/stack_reference.h b/runtime/stack_reference.h
new file mode 100644
index 0000000..3d37b76
--- /dev/null
+++ b/runtime/stack_reference.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_STACK_REFERENCE_H_
+#define ART_RUNTIME_STACK_REFERENCE_H_
+
+#include "base/macros.h"
+#include "mirror/object_reference.h"
+
+namespace art {
+
+// A reference from the shadow stack to a MirrorType object within the Java heap.
+template<class MirrorType>
+class PACKED(4) StackReference : public mirror::CompressedReference<MirrorType> {
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_STACK_REFERENCE_H_
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 79b9f02..dde3640 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1362,7 +1362,7 @@
struct StackDumpVisitor : public StackVisitor {
StackDumpVisitor(std::ostream& os_in, Thread* thread_in, Context* context, bool can_allocate_in)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread_in, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
os(os_in),
can_allocate(can_allocate_in),
@@ -1377,7 +1377,7 @@
}
}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m->IsRuntimeMethod()) {
return true;
@@ -1425,7 +1425,7 @@
}
static void DumpLockedObject(mirror::Object* o, void* context)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostream& os = *reinterpret_cast<std::ostream*>(context);
os << " - locked ";
if (o == nullptr) {
@@ -1462,7 +1462,7 @@
};
static bool ShouldShowNativeStack(const Thread* thread)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ThreadState state = thread->GetState();
// In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting.
@@ -1975,11 +1975,11 @@
class CountStackDepthVisitor : public StackVisitor {
public:
explicit CountStackDepthVisitor(Thread* thread)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
depth_(0), skip_depth_(0), skipping_(true) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
// We want to skip frames up to and including the exception's constructor.
// Note we also skip the frame if it doesn't have a method (namely the callee
// save frame)
@@ -2025,7 +2025,7 @@
trace_(nullptr),
pointer_size_(Runtime::Current()->GetClassLinker()->GetImagePointerSize()) {}
- bool Init(int depth) SHARED_REQUIRES(Locks::mutator_lock_) ACQUIRE(Roles::uninterruptible_) {
+ bool Init(int depth) REQUIRES_SHARED(Locks::mutator_lock_) ACQUIRE(Roles::uninterruptible_) {
// Allocate method trace as an object array where the first element is a pointer array that
// contains the ArtMethod pointers and dex PCs. The rest of the elements are the declaring
// class of the ArtMethod pointers.
@@ -2061,7 +2061,7 @@
self_->EndAssertNoThreadSuspension(nullptr);
}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
if (trace_ == nullptr) {
return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError.
}
@@ -2086,7 +2086,7 @@
return true;
}
- mirror::PointerArray* GetTraceMethodsAndPCs() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::PointerArray* GetTraceMethodsAndPCs() const REQUIRES_SHARED(Locks::mutator_lock_) {
return down_cast<mirror::PointerArray*>(trace_->Get(0));
}
@@ -2266,7 +2266,7 @@
}
static mirror::ClassLoader* GetCurrentClassLoader(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = self->GetCurrentMethod(nullptr);
return method != nullptr
? method->GetDeclaringClass()->GetClassLoader()
@@ -2670,13 +2670,13 @@
// so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack.
struct CurrentMethodVisitor FINAL : public StackVisitor {
CurrentMethodVisitor(Thread* thread, Context* context, bool abort_on_error)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
this_object_(nullptr),
method_(nullptr),
dex_pc_(0),
abort_on_error_(abort_on_error) {}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m->IsRuntimeMethod()) {
// Continue if this is a runtime method.
@@ -2716,13 +2716,13 @@
class ReferenceMapVisitor : public StackVisitor {
public:
ReferenceMapVisitor(Thread* thread, Context* context, RootVisitor& visitor)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
// We are visiting the references in compiled frames, so we do not need
// to know the inlined frames.
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kSkipInlinedFrames),
visitor_(visitor) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
if (false) {
LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod())
<< StringPrintf("@ PC:%04x", GetDexPc());
@@ -2736,7 +2736,7 @@
return true;
}
- void VisitShadowFrame(ShadowFrame* shadow_frame) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void VisitShadowFrame(ShadowFrame* shadow_frame) REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = shadow_frame->GetMethod();
VisitDeclaringClass(m);
DCHECK(m != nullptr);
@@ -2762,7 +2762,7 @@
// is executing. We need to ensure that the code stays mapped. NO_THREAD_SAFETY_ANALYSIS since
// the threads do not all hold the heap bitmap lock for parallel GC.
void VisitDeclaringClass(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
NO_THREAD_SAFETY_ANALYSIS {
mirror::Class* klass = method->GetDeclaringClassUnchecked<kWithoutReadBarrier>();
// klass can be null for runtime methods.
@@ -2798,7 +2798,7 @@
}
}
- void VisitQuickFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void VisitQuickFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
DCHECK(cur_quick_frame != nullptr);
ArtMethod* m = *cur_quick_frame;
@@ -2852,7 +2852,7 @@
RootCallbackVisitor(RootVisitor* visitor, uint32_t tid) : visitor_(visitor), tid_(tid) {}
void operator()(mirror::Object** obj, size_t vreg, const StackVisitor* stack_visitor) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
visitor_->VisitRoot(obj, JavaFrameRootInfo(tid_, stack_visitor, vreg));
}
@@ -2925,7 +2925,7 @@
class VerifyRootVisitor : public SingleRootVisitor {
public:
void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
VerifyObject(root);
}
};
diff --git a/runtime/thread.h b/runtime/thread.h
index 1c2d4ab..d248123 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -166,18 +166,18 @@
static Thread* Current();
// On a runnable thread, check for pending thread suspension request and handle if pending.
- void AllowThreadSuspension() SHARED_REQUIRES(Locks::mutator_lock_);
+ void AllowThreadSuspension() REQUIRES_SHARED(Locks::mutator_lock_);
// Process pending thread suspension request and handle if pending.
- void CheckSuspend() SHARED_REQUIRES(Locks::mutator_lock_);
+ void CheckSuspend() REQUIRES_SHARED(Locks::mutator_lock_);
static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
mirror::Object* thread_peer)
REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Translates 172 to pAllocArrayFromCode and so on.
template<PointerSize size_of_pointers>
@@ -191,17 +191,17 @@
bool dump_native_stack = true,
BacktraceMap* backtrace_map = nullptr) const
REQUIRES(!Locks::thread_suspend_count_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void DumpJavaStack(std::ostream& os) const
REQUIRES(!Locks::thread_suspend_count_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which
// case we use 'tid' to identify the thread, and we'll include as much information as we can.
static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
REQUIRES(!Locks::thread_suspend_count_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ThreadState GetState() const {
DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated);
@@ -248,7 +248,7 @@
// mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
void FullSuspendCheck()
REQUIRES(!Locks::thread_suspend_count_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Transition from non-runnable to runnable state acquiring share on mutator_lock_.
ALWAYS_INLINE ThreadState TransitionFromSuspendedToRunnable()
@@ -297,7 +297,7 @@
size_t NumberOfHeldMutexes() const;
- bool HoldsLock(mirror::Object*) const SHARED_REQUIRES(Locks::mutator_lock_);
+ bool HoldsLock(mirror::Object*) const REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Changes the priority of this thread to match that of the java.lang.Thread object.
@@ -326,19 +326,19 @@
// Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
mirror::String* GetThreadName(const ScopedObjectAccessAlreadyRunnable& ts) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
// allocation, or locking.
void GetThreadName(std::string& name) const;
// Sets the thread's name.
- void SetThreadName(const char* name) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetThreadName(const char* name) REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
uint64_t GetCpuMicroTime() const;
- mirror::Object* GetPeer() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Object* GetPeer() const REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(tlsPtr_.jpeer == nullptr);
return tlsPtr_.opeer;
}
@@ -357,23 +357,23 @@
return tlsPtr_.exception != nullptr;
}
- mirror::Throwable* GetException() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Throwable* GetException() const REQUIRES_SHARED(Locks::mutator_lock_) {
return tlsPtr_.exception;
}
void AssertPendingException() const;
- void AssertPendingOOMException() const SHARED_REQUIRES(Locks::mutator_lock_);
+ void AssertPendingOOMException() const REQUIRES_SHARED(Locks::mutator_lock_);
void AssertNoPendingException() const;
void AssertNoPendingExceptionForNewException(const char* msg) const;
- void SetException(mirror::Throwable* new_exception) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetException(mirror::Throwable* new_exception) REQUIRES_SHARED(Locks::mutator_lock_);
- void ClearException() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void ClearException() REQUIRES_SHARED(Locks::mutator_lock_) {
tlsPtr_.exception = nullptr;
}
// Find catch block and perform long jump to appropriate exception handle
- NO_RETURN void QuickDeliverException() SHARED_REQUIRES(Locks::mutator_lock_);
+ NO_RETURN void QuickDeliverException() REQUIRES_SHARED(Locks::mutator_lock_);
Context* GetLongJumpContext();
void ReleaseLongJumpContext(Context* context) {
@@ -395,12 +395,12 @@
// Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
// abort the runtime iff abort_on_error is true.
ArtMethod* GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error = true) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns whether the given exception was thrown by the current Java method being executed
// (Note that this includes native Java methods).
bool IsExceptionThrownByCurrentMethod(mirror::Throwable* exception) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SetTopOfStack(ArtMethod** top_method) {
tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
@@ -417,23 +417,23 @@
// If 'msg' is null, no detail message is set.
void ThrowNewException(const char* exception_class_descriptor, const char* msg)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
// If 'msg' is null, no detail message is set. An exception must be pending, and will be
// used as the new exception's cause.
void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
__attribute__((format(printf, 3, 4)))
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
// OutOfMemoryError is special, because we need to pre-allocate an instance.
// Only the GC should call this.
- void ThrowOutOfMemoryError(const char* msg) SHARED_REQUIRES(Locks::mutator_lock_)
+ void ThrowOutOfMemoryError(const char* msg) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
static void Startup();
@@ -446,15 +446,15 @@
}
// Convert a jobject into a Object*
- mirror::Object* DecodeJObject(jobject obj) const SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Object* DecodeJObject(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
// Checks if the weak global ref has been cleared by the GC without decoding it.
- bool IsJWeakCleared(jweak obj) const SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsJWeakCleared(jweak obj) const REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::Object* GetMonitorEnterObject() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Object* GetMonitorEnterObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
return tlsPtr_.monitor_enter_object;
}
- void SetMonitorEnterObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetMonitorEnterObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
tlsPtr_.monitor_enter_object = obj;
}
@@ -510,7 +510,7 @@
// and space efficient to compute than the StackTraceElement[].
template<bool kTransactionActive>
jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
// StackTraceElement[]. If output_array is null, a new array is created, otherwise as many
@@ -519,15 +519,15 @@
static jobjectArray InternalStackTraceToStackTraceElementArray(
const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
jobjectArray output_array = nullptr, int* stack_depth = nullptr)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool HasDebuggerShadowFrames() const {
return tlsPtr_.frame_id_to_shadow_frame != nullptr;
}
- void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+ void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE void VerifyStack() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE void VerifyStack() REQUIRES_SHARED(Locks::mutator_lock_);
//
// Offsets of various members of native Thread class, used by compiled code.
@@ -555,7 +555,7 @@
}
// Deoptimize the Java stack.
- void DeoptimizeWithDeoptimizationException(JValue* result) SHARED_REQUIRES(Locks::mutator_lock_);
+ void DeoptimizeWithDeoptimizationException(JValue* result) REQUIRES_SHARED(Locks::mutator_lock_);
private:
template<PointerSize pointer_size>
@@ -702,7 +702,7 @@
}
// Set the stack end to that to be used during a stack overflow
- void SetStackEndForStackOverflow() SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetStackEndForStackOverflow() REQUIRES_SHARED(Locks::mutator_lock_);
// Set the stack end to that to be used during regular execution
void ResetDefaultStackEnd() {
@@ -765,7 +765,7 @@
}
// Number of references allocated in JNI ShadowFrames on this thread.
- size_t NumJniShadowFrameReferences() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t NumJniShadowFrameReferences() const REQUIRES_SHARED(Locks::mutator_lock_) {
return tlsPtr_.managed_stack.NumJniShadowFrameReferences();
}
@@ -773,7 +773,7 @@
size_t NumHandleReferences();
// Number of references allocated in handle scopes & JNI shadow frames on this thread.
- size_t NumStackReferences() SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t NumStackReferences() REQUIRES_SHARED(Locks::mutator_lock_) {
return NumHandleReferences() + NumJniShadowFrameReferences();
}
@@ -781,7 +781,7 @@
bool HandleScopeContains(jobject obj) const;
void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
HandleScope* GetTopHandleScope() {
return tlsPtr_.top_handle_scope;
@@ -905,32 +905,32 @@
bool is_reference,
bool from_code,
mirror::Throwable* exception)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void PopDeoptimizationContext(JValue* result, mirror::Throwable** exception, bool* from_code)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void AssertHasDeoptimizationContext()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type);
ShadowFrame* PopStackedShadowFrame(StackedShadowFrameType type, bool must_be_present = true);
// For debugger, find the shadow frame that corresponds to a frame id.
// Or return null if there is none.
ShadowFrame* FindDebuggerShadowFrame(size_t frame_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// For debugger, find the bool array that keeps track of the updated vreg set
// for a frame id.
- bool* GetUpdatedVRegFlags(size_t frame_id) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool* GetUpdatedVRegFlags(size_t frame_id) REQUIRES_SHARED(Locks::mutator_lock_);
// For debugger, find the shadow frame that corresponds to a frame id. If
// one doesn't exist yet, create one and track it in frame_id_to_shadow_frame.
ShadowFrame* FindOrCreateDebuggerShadowFrame(size_t frame_id,
uint32_t num_vregs,
ArtMethod* method,
uint32_t dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Delete the entry that maps from frame_id to shadow_frame.
void RemoveDebuggerShadowFrameMapping(size_t frame_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
std::deque<instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() {
return tlsPtr_.instrumentation_stack;
@@ -1016,7 +1016,7 @@
// Push an object onto the allocation stack.
bool PushOnThreadLocalAllocationStack(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Set the thread local allocation pointers to the given pointers.
void SetThreadLocalAllocationStack(StackReference<mirror::Object>* start,
@@ -1129,7 +1129,7 @@
template<bool kTransactionActive>
void InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group,
jobject thread_name, jint thread_priority)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and
// Dbg::ManageDeoptimization.
@@ -1148,25 +1148,25 @@
return old_state;
}
- void VerifyStackImpl() SHARED_REQUIRES(Locks::mutator_lock_);
+ void VerifyStackImpl() REQUIRES_SHARED(Locks::mutator_lock_);
- void DumpState(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_);
+ void DumpState(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
void DumpStack(std::ostream& os,
bool dump_native_stack = true,
BacktraceMap* backtrace_map = nullptr) const
REQUIRES(!Locks::thread_suspend_count_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Out-of-line conveniences for debugging in gdb.
static Thread* CurrentFromGdb(); // Like Thread::Current.
// Like Thread::Dump(std::cerr).
- void DumpFromGdb() const SHARED_REQUIRES(Locks::mutator_lock_);
+ void DumpFromGdb() const REQUIRES_SHARED(Locks::mutator_lock_);
static void* CreateCallback(void* arg);
void HandleUncaughtExceptions(ScopedObjectAccess& soa)
- SHARED_REQUIRES(Locks::mutator_lock_);
- void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void RemoveFromThreadGroup(ScopedObjectAccess& soa) REQUIRES_SHARED(Locks::mutator_lock_);
// Initialize a thread.
//
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index 49f65e1..5880085 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -142,11 +142,11 @@
!Locks::thread_suspend_count_lock_);
void VisitRoots(RootVisitor* visitor) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void VisitRootsForSuspendedThreads(RootVisitor* visitor)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Return a copy of the thread list.
std::list<Thread*> GetList() REQUIRES(Locks::thread_list_lock_) {
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 56a26de..23591c2 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -57,7 +57,7 @@
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
method_trace_(Trace::AllocStackTrace()) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
// Ignore runtime frames (in particular callee save).
if (!m->IsRuntimeMethod()) {
@@ -220,7 +220,7 @@
*buf++ = static_cast<uint8_t>(val >> 56);
}
-static void GetSample(Thread* thread, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) {
+static void GetSample(Thread* thread, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
BuildStackTraceVisitor build_trace_visitor(thread);
build_trace_visitor.WalkStack();
std::vector<ArtMethod*>* stack_trace = build_trace_visitor.GetStackTrace();
@@ -747,7 +747,7 @@
ArtMethod* method,
uint32_t dex_pc,
ArtField* field ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected field read event in tracing " << PrettyMethod(method) << " " << dex_pc;
}
@@ -758,7 +758,7 @@
uint32_t dex_pc,
ArtField* field ATTRIBUTE_UNUSED,
const JValue& field_value ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected field write event in tracing " << PrettyMethod(method) << " " << dex_pc;
}
@@ -793,13 +793,13 @@
void Trace::ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED,
mirror::Throwable* exception_object ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(ERROR) << "Unexpected exception caught event in tracing";
}
void Trace::Branch(Thread* /*thread*/, ArtMethod* method,
uint32_t /*dex_pc*/, int32_t /*dex_pc_offset*/)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(ERROR) << "Unexpected branch event in tracing" << PrettyMethod(method);
}
diff --git a/runtime/trace.h b/runtime/trace.h
index 9b29fb9..824b150 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -137,43 +137,43 @@
uint32_t GetClockOverheadNanoSeconds();
void CompareAndUpdateStackTrace(Thread* thread, std::vector<ArtMethod*>* stack_trace)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_);
// InstrumentationListener implementation.
void MethodEntered(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
OVERRIDE;
void MethodExited(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc,
const JValue& return_value)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
OVERRIDE;
void MethodUnwind(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
OVERRIDE;
void DexPcMoved(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t new_dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
OVERRIDE;
void FieldRead(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc, ArtField* field)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
void FieldWritten(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc, ArtField* field,
const JValue& field_value)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
void ExceptionCaught(Thread* thread, mirror::Throwable* exception_object)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
void Branch(Thread* thread, ArtMethod* method, uint32_t dex_pc, int32_t dex_pc_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
void InvokeVirtualOrInterface(Thread* thread,
mirror::Object* this_object,
ArtMethod* caller,
uint32_t dex_pc,
ArtMethod* callee)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
// Reuse an old stack trace if it exists, otherwise allocate a new one.
static std::vector<ArtMethod*>* AllocStackTrace();
// Clear and store an old stack trace for later use.
@@ -202,26 +202,26 @@
// This causes the negative annotations to incorrectly have a false positive. TODO: Figure out
// how to annotate this.
NO_THREAD_SAFETY_ANALYSIS;
- void FinishTracing() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_);
+ void FinishTracing() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_);
void ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wall_clock_diff);
void LogMethodTraceEvent(Thread* thread, ArtMethod* method,
instrumentation::Instrumentation::InstrumentationEvent event,
uint32_t thread_clock_diff, uint32_t wall_clock_diff)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_);
// Methods to output traced methods and threads.
void GetVisitedMethods(size_t end_offset, std::set<ArtMethod*>* visited_methods)
REQUIRES(!*unique_methods_lock_);
void DumpMethodList(std::ostream& os, const std::set<ArtMethod*>& visited_methods)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_);
void DumpThreadList(std::ostream& os) REQUIRES(!Locks::thread_list_lock_);
// Methods to register seen entitites in streaming mode. The methods return true if the entity
// is newly discovered.
bool RegisterMethod(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(streaming_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(streaming_lock_);
bool RegisterThread(Thread* thread)
REQUIRES(streaming_lock_);
@@ -235,10 +235,10 @@
REQUIRES(!*unique_methods_lock_);
ArtMethod* DecodeTraceMethod(uint32_t tmid) REQUIRES(!*unique_methods_lock_);
std::string GetMethodLine(ArtMethod* method) REQUIRES(!*unique_methods_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void DumpBuf(uint8_t* buf, size_t buf_size, TraceClockSource clock_source)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_);
// Singleton instance of the Trace or null when no method tracing is active.
static Trace* volatile the_trace_ GUARDED_BY(Locks::trace_lock_);
diff --git a/runtime/transaction.cc b/runtime/transaction.cc
index d91860b..9f8d981 100644
--- a/runtime/transaction.cc
+++ b/runtime/transaction.cc
@@ -49,13 +49,15 @@
for (auto it : array_logs_) {
array_values_count += it.second.Size();
}
- size_t string_count = intern_string_logs_.size();
+ size_t intern_string_count = intern_string_logs_.size();
+ size_t resolve_string_count = resolve_string_logs_.size();
LOG(INFO) << "Transaction::~Transaction"
<< ": objects_count=" << objects_count
<< ", field_values_count=" << field_values_count
<< ", array_count=" << array_count
<< ", array_values_count=" << array_values_count
- << ", string_count=" << string_count;
+ << ", intern_string_count=" << intern_string_count
+ << ", resolve_string_count=" << resolve_string_count;
}
}
@@ -165,6 +167,13 @@
array_log.LogValue(index, value);
}
+void Transaction::RecordResolveString(mirror::DexCache* dex_cache, uint32_t string_idx) {
+ DCHECK(dex_cache != nullptr);
+ DCHECK_LT(string_idx, dex_cache->GetDexFile()->NumStringIds());
+ MutexLock mu(Thread::Current(), log_lock_);
+ resolve_string_logs_.push_back(ResolveStringLog(dex_cache, string_idx));
+}
+
void Transaction::RecordStrongStringInsertion(mirror::String* s) {
InternStringLog log(s, InternStringLog::kStrongString, InternStringLog::kInsert);
LogInternedString(log);
@@ -200,6 +209,7 @@
UndoObjectModifications();
UndoArrayModifications();
UndoInternStringTableModifications();
+ UndoResolveStringModifications();
}
void Transaction::UndoObjectModifications() {
@@ -230,11 +240,19 @@
intern_string_logs_.clear();
}
+void Transaction::UndoResolveStringModifications() {
+ for (ResolveStringLog& string_log : resolve_string_logs_) {
+ string_log.Undo();
+ }
+ resolve_string_logs_.clear();
+}
+
void Transaction::VisitRoots(RootVisitor* visitor) {
MutexLock mu(Thread::Current(), log_lock_);
VisitObjectLogs(visitor);
VisitArrayLogs(visitor);
- VisitStringLogs(visitor);
+ VisitInternStringLogs(visitor);
+ VisitResolveStringLogs(visitor);
}
void Transaction::VisitObjectLogs(RootVisitor* visitor) {
@@ -292,12 +310,18 @@
}
}
-void Transaction::VisitStringLogs(RootVisitor* visitor) {
+void Transaction::VisitInternStringLogs(RootVisitor* visitor) {
for (InternStringLog& log : intern_string_logs_) {
log.VisitRoots(visitor);
}
}
+void Transaction::VisitResolveStringLogs(RootVisitor* visitor) {
+ for (ResolveStringLog& log : resolve_string_logs_) {
+ log.VisitRoots(visitor);
+ }
+}
+
void Transaction::ObjectLog::LogBooleanValue(MemberOffset offset, uint8_t value, bool is_volatile) {
LogValue(ObjectLog::kBoolean, offset, value, is_volatile);
}
@@ -481,6 +505,21 @@
visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&str_), RootInfo(kRootInternedString));
}
+void Transaction::ResolveStringLog::Undo() {
+ dex_cache_.Read()->ClearString(string_idx_);
+}
+
+Transaction::ResolveStringLog::ResolveStringLog(mirror::DexCache* dex_cache, uint32_t string_idx)
+ : dex_cache_(dex_cache),
+ string_idx_(string_idx) {
+ DCHECK(dex_cache != nullptr);
+ DCHECK_LT(string_idx_, dex_cache->GetDexFile()->NumStringIds());
+}
+
+void Transaction::ResolveStringLog::VisitRoots(RootVisitor* visitor) {
+ dex_cache_.VisitRoot(visitor, RootInfo(kRootVMInternal));
+}
+
void Transaction::ArrayLog::LogValue(size_t index, uint64_t value) {
auto it = array_values_.find(index);
if (it == array_values_.end()) {
diff --git a/runtime/transaction.h b/runtime/transaction.h
index 8ff0614..584dfb8 100644
--- a/runtime/transaction.h
+++ b/runtime/transaction.h
@@ -32,6 +32,7 @@
namespace art {
namespace mirror {
class Array;
+class DexCache;
class Object;
class String;
}
@@ -47,10 +48,10 @@
void Abort(const std::string& abort_message)
REQUIRES(!log_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void ThrowAbortError(Thread* self, const std::string* abort_message)
REQUIRES(!log_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool IsAborted() REQUIRES(!log_lock_);
// Record object field changes.
@@ -79,7 +80,7 @@
// Record array change.
void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value)
REQUIRES(!log_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Record intern string table changes.
void RecordStrongStringInsertion(mirror::String* s)
@@ -95,14 +96,19 @@
REQUIRES(Locks::intern_table_lock_)
REQUIRES(!log_lock_);
+ // Record resolve string.
+ void RecordResolveString(mirror::DexCache* dex_cache, uint32_t string_idx)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!log_lock_);
+
// Abort transaction by undoing all recorded changes.
void Rollback()
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!log_lock_);
void VisitRoots(RootVisitor* visitor)
REQUIRES(!log_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
class ObjectLog : public ValueObject {
@@ -115,8 +121,8 @@
void Log64BitsValue(MemberOffset offset, uint64_t value, bool is_volatile);
void LogReferenceValue(MemberOffset offset, mirror::Object* obj, bool is_volatile);
- void Undo(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
- void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+ void Undo(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
+ void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
size_t Size() const {
return field_values_.size();
@@ -141,7 +147,7 @@
void LogValue(FieldValueKind kind, MemberOffset offset, uint64_t value, bool is_volatile);
void UndoFieldWrite(mirror::Object* obj, MemberOffset field_offset,
- const FieldValue& field_value) SHARED_REQUIRES(Locks::mutator_lock_);
+ const FieldValue& field_value) REQUIRES_SHARED(Locks::mutator_lock_);
// Maps field's offset to its value.
std::map<uint32_t, FieldValue> field_values_;
@@ -151,7 +157,7 @@
public:
void LogValue(size_t index, uint64_t value);
- void Undo(mirror::Array* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ void Undo(mirror::Array* obj) REQUIRES_SHARED(Locks::mutator_lock_);
size_t Size() const {
return array_values_.size();
@@ -159,7 +165,7 @@
private:
void UndoArrayWrite(mirror::Array* array, Primitive::Type array_type, size_t index,
- uint64_t value) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint64_t value) REQUIRES_SHARED(Locks::mutator_lock_);
// Maps index to value.
// TODO use JValue instead ?
@@ -182,9 +188,9 @@
}
void Undo(InternTable* intern_table)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::intern_table_lock_);
- void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+ void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
private:
mirror::String* str_;
@@ -192,30 +198,49 @@
const StringOp string_op_;
};
+ class ResolveStringLog : public ValueObject {
+ public:
+ ResolveStringLog(mirror::DexCache* dex_cache, uint32_t string_idx);
+
+ void Undo() REQUIRES_SHARED(Locks::mutator_lock_);
+
+ void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ private:
+ GcRoot<mirror::DexCache> dex_cache_;
+ const uint32_t string_idx_;
+ };
+
void LogInternedString(const InternStringLog& log)
REQUIRES(Locks::intern_table_lock_)
REQUIRES(!log_lock_);
void UndoObjectModifications()
REQUIRES(log_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void UndoArrayModifications()
REQUIRES(log_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void UndoInternStringTableModifications()
REQUIRES(Locks::intern_table_lock_)
REQUIRES(log_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void UndoResolveStringModifications()
+ REQUIRES(log_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
void VisitObjectLogs(RootVisitor* visitor)
REQUIRES(log_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void VisitArrayLogs(RootVisitor* visitor)
REQUIRES(log_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
- void VisitStringLogs(RootVisitor* visitor)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void VisitInternStringLogs(RootVisitor* visitor)
REQUIRES(log_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void VisitResolveStringLogs(RootVisitor* visitor)
+ REQUIRES(log_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
const std::string& GetAbortMessage() REQUIRES(!log_lock_);
@@ -223,6 +248,7 @@
std::map<mirror::Object*, ObjectLog> object_logs_ GUARDED_BY(log_lock_);
std::map<mirror::Array*, ArrayLog> array_logs_ GUARDED_BY(log_lock_);
std::list<InternStringLog> intern_string_logs_ GUARDED_BY(log_lock_);
+ std::list<ResolveStringLog> resolve_string_logs_ GUARDED_BY(log_lock_);
bool aborted_ GUARDED_BY(log_lock_);
std::string abort_message_ GUARDED_BY(log_lock_);
diff --git a/runtime/transaction_test.cc b/runtime/transaction_test.cc
index 8279a26..82e529c 100644
--- a/runtime/transaction_test.cc
+++ b/runtime/transaction_test.cc
@@ -20,11 +20,14 @@
#include "art_method-inl.h"
#include "class_linker-inl.h"
#include "common_runtime_test.h"
+#include "dex_file.h"
#include "mirror/array-inl.h"
#include "scoped_thread_state_change.h"
namespace art {
+static const size_t kDexNoIndex = DexFile::kDexNoIndex; // Make copy to prevent linking errors.
+
class TransactionTest : public CommonRuntimeTest {
public:
// Tests failing class initialization due to native call with transaction rollback.
@@ -482,6 +485,55 @@
EXPECT_EQ(objectArray->GetWithoutChecks(0), nullptr);
}
+// Tests rolling back interned strings and resolved strings.
+TEST_F(TransactionTest, ResolveString) {
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("Transaction"))));
+ ASSERT_TRUE(class_loader.Get() != nullptr);
+
+ Handle<mirror::Class> h_klass(
+ hs.NewHandle(class_linker_->FindClass(soa.Self(), "LTransaction$ResolveString;",
+ class_loader)));
+ ASSERT_TRUE(h_klass.Get() != nullptr);
+
+ Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(h_klass->GetDexCache()));
+ ASSERT_TRUE(h_dex_cache.Get() != nullptr);
+ const DexFile* const dex_file = h_dex_cache->GetDexFile();
+ ASSERT_TRUE(dex_file != nullptr);
+
+ // Go search the dex file to find the string id of our string.
+ static const char* kResolvedString = "ResolvedString";
+ const DexFile::StringId* string_id = dex_file->FindStringId(kResolvedString);
+ ASSERT_TRUE(string_id != nullptr);
+ uint32_t string_idx = dex_file->GetIndexForStringId(*string_id);
+ ASSERT_NE(string_idx, kDexNoIndex);
+ // String should only get resolved by the initializer.
+ EXPECT_TRUE(class_linker_->LookupString(*dex_file, string_idx, h_dex_cache) == nullptr);
+ EXPECT_TRUE(h_dex_cache->GetResolvedString(string_idx) == nullptr);
+ // Do the transaction, then roll back.
+ Transaction transaction;
+ Runtime::Current()->EnterTransactionMode(&transaction);
+ bool success = class_linker_->EnsureInitialized(soa.Self(), h_klass, true, true);
+ ASSERT_TRUE(success);
+ ASSERT_TRUE(h_klass->IsInitialized());
+ // Make sure the string got resolved by the transaction.
+ {
+ mirror::String* s = class_linker_->LookupString(*dex_file, string_idx, h_dex_cache);
+ ASSERT_TRUE(s != nullptr);
+ EXPECT_STREQ(s->ToModifiedUtf8().c_str(), kResolvedString);
+ EXPECT_EQ(s, h_dex_cache->GetResolvedString(string_idx));
+ }
+ Runtime::Current()->ExitTransactionMode();
+ transaction.Rollback();
+ // Check that the string did not stay resolved.
+ EXPECT_TRUE(class_linker_->LookupString(*dex_file, string_idx, h_dex_cache) == nullptr);
+ EXPECT_TRUE(h_dex_cache->GetResolvedString(string_idx) == nullptr);
+ ASSERT_FALSE(h_klass->IsInitialized());
+ ASSERT_FALSE(soa.Self()->IsExceptionPending());
+}
+
// Tests successful class initialization without class initializer.
TEST_F(TransactionTest, EmptyClass) {
ScopedObjectAccess soa(Thread::Current());
diff --git a/runtime/utf.h b/runtime/utf.h
index 7c9c333..cbb32fa 100644
--- a/runtime/utf.h
+++ b/runtime/utf.h
@@ -18,7 +18,6 @@
#define ART_RUNTIME_UTF_H_
#include "base/macros.h"
-#include "base/mutex.h"
#include <stddef.h>
#include <stdint.h>
@@ -31,11 +30,6 @@
*/
namespace art {
-namespace mirror {
- template<class T> class PrimitiveArray;
- typedef PrimitiveArray<uint16_t> CharArray;
-} // namespace mirror
-
/*
* Returns the number of UTF-16 characters in the given modified UTF-8 string.
*/
@@ -80,9 +74,6 @@
/*
* The java.lang.String hashCode() algorithm.
*/
-int32_t ComputeUtf16Hash(mirror::CharArray* chars, int32_t offset, size_t char_count)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
template<typename MemoryType>
int32_t ComputeUtf16Hash(const MemoryType* chars, size_t char_count) {
uint32_t hash = 0;
diff --git a/runtime/utils.cc b/runtime/utils.cc
index b676ae5..313190c 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1155,8 +1155,15 @@
// change process groups, so we don't get reaped by ProcessManager
setpgid(0, 0);
- execv(program, &args[0]);
- PLOG(ERROR) << "Failed to execv(" << command_line << ")";
+ // (b/30160149): protect subprocesses from modifications to LD_LIBRARY_PATH, etc.
+ // Use the snapshot of the environment from the time the runtime was created.
+ char** envp = (Runtime::Current() == nullptr) ? nullptr : Runtime::Current()->GetEnvSnapshot();
+ if (envp == nullptr) {
+ execv(program, &args[0]);
+ } else {
+ execve(program, &args[0], envp);
+ }
+ PLOG(ERROR) << "Failed to execve(" << command_line << ")";
// _exit to avoid atexit handlers in child.
_exit(1);
} else {
diff --git a/runtime/utils.h b/runtime/utils.h
index 693e0b8..958f0a3 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -134,22 +134,22 @@
// "[[I" would be "int[][]", "[Ljava/lang/String;" would be
// "java.lang.String[]", and so forth.
std::string PrettyDescriptor(mirror::String* descriptor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
std::string PrettyDescriptor(const char* descriptor);
std::string PrettyDescriptor(mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
std::string PrettyDescriptor(Primitive::Type type);
// Returns a human-readable signature for 'f'. Something like "a.b.C.f" or
// "int a.b.C.f" (depending on the value of 'with_type').
std::string PrettyField(ArtField* f, bool with_type = true)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
std::string PrettyField(uint32_t field_idx, const DexFile& dex_file, bool with_type = true);
// Returns a human-readable signature for 'm'. Something like "a.b.C.m" or
// "a.b.C.m(II)V" (depending on the value of 'with_signature').
std::string PrettyMethod(ArtMethod* m, bool with_signature = true)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
std::string PrettyMethod(uint32_t method_idx, const DexFile& dex_file, bool with_signature = true);
// Returns a human-readable form of the name of the *class* of the given object.
@@ -157,7 +157,7 @@
// be "java.lang.String". Given an array of int, the output would be "int[]".
// Given String.class, the output would be "java.lang.Class<java.lang.String>".
std::string PrettyTypeOf(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns a human-readable form of the type at an index in the specified dex file.
// Example outputs: char[], java.lang.String.
@@ -166,11 +166,11 @@
// Returns a human-readable form of the name of the given class.
// Given String.class, the output would be "java.lang.Class<java.lang.String>".
std::string PrettyClass(mirror::Class* c)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns a human-readable form of the name of the given class with its class loader.
std::string PrettyClassAndClassLoader(mirror::Class* c)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns a human-readable version of the Java part of the access flags, e.g., "private static "
// (note the trailing whitespace).
@@ -205,10 +205,10 @@
// Returns the JNI native function name for the non-overloaded method 'm'.
std::string JniShortName(ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the JNI native function name for the overloaded method 'm'.
std::string JniLongName(ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool ReadFileToString(const std::string& file_name, std::string* result);
bool PrintFileToLog(const std::string& file_name, LogSeverity level);
@@ -269,6 +269,9 @@
std::string GetSystemImageFilename(const char* location, InstructionSet isa);
// Wrapper on fork/execv to run a command in a subprocess.
+// Both of these spawn child processes using the environment as it was set when the single instance
+// of the runtime (Runtime::Current()) was started. If no instance of the runtime was started, it
+// will use the current environment settings.
bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg);
int ExecAndReturnCode(std::vector<std::string>& arg_vector, std::string* error_msg);
@@ -308,7 +311,7 @@
return pointer_size == 4 || pointer_size == 8;
}
-void DumpMethodCFG(ArtMethod* method, std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_);
+void DumpMethodCFG(ArtMethod* method, std::ostream& os) REQUIRES_SHARED(Locks::mutator_lock_);
void DumpMethodCFG(const DexFile* dex_file, uint32_t dex_method_idx, std::ostream& os);
static inline const void* EntryPointToCodePointer(const void* entry_point) {
@@ -383,6 +386,16 @@
__builtin___clear_cache(begin, end);
}
+template <typename T>
+constexpr PointerSize ConvertToPointerSize(T any) {
+ if (any == 4 || any == 8) {
+ return static_cast<PointerSize>(any);
+ } else {
+ LOG(FATAL);
+ UNREACHABLE();
+ }
+}
+
} // namespace art
#endif // ART_RUNTIME_UTILS_H_
diff --git a/runtime/utils/dex_cache_arrays_layout-inl.h b/runtime/utils/dex_cache_arrays_layout-inl.h
index 4c63156..a85d033 100644
--- a/runtime/utils/dex_cache_arrays_layout-inl.h
+++ b/runtime/utils/dex_cache_arrays_layout-inl.h
@@ -55,7 +55,8 @@
template <typename T>
static constexpr PointerSize GcRootAsPointerSize() {
- return ConvertToPointerSize(sizeof(GcRoot<T>));
+ static_assert(sizeof(GcRoot<T>) == 4U, "Unexpected GcRoot size");
+ return PointerSize::k32;
}
inline size_t DexCacheArraysLayout::TypeOffset(uint32_t type_idx) const {
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index 0a01cdb..3ba20a4 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -16,6 +16,8 @@
#include "utils.h"
+#include <stdlib.h>
+
#include "base/enums.h"
#include "class_linker-inl.h"
#include "common_runtime_test.h"
@@ -380,10 +382,59 @@
if (!(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
// Running on valgrind fails due to some memory that leaks in thread alternate signal stacks.
EXPECT_FALSE(Exec(command, &error_msg));
- EXPECT_NE(0U, error_msg.size());
+ EXPECT_FALSE(error_msg.empty());
}
}
+TEST_F(UtilsTest, EnvSnapshotAdditionsAreNotVisible) {
+ static constexpr const char* kModifiedVariable = "EXEC_SHOULD_NOT_EXPORT_THIS";
+ static constexpr int kOverwrite = 1;
+ // Set an variable in the current environment.
+ EXPECT_EQ(setenv(kModifiedVariable, "NEVER", kOverwrite), 0);
+ // Test that it is not exported.
+ std::vector<std::string> command;
+ if (kIsTargetBuild) {
+ std::string android_root(GetAndroidRoot());
+ command.push_back(android_root + "/bin/printenv");
+ } else {
+ command.push_back("/usr/bin/printenv");
+ }
+ command.push_back(kModifiedVariable);
+ std::string error_msg;
+ if (!(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
+ // Running on valgrind fails due to some memory that leaks in thread alternate signal stacks.
+ EXPECT_FALSE(Exec(command, &error_msg));
+ EXPECT_NE(0U, error_msg.size()) << error_msg;
+ }
+}
+
+TEST_F(UtilsTest, EnvSnapshotDeletionsAreNotVisible) {
+ static constexpr const char* kDeletedVariable = "PATH";
+ static constexpr int kOverwrite = 1;
+ // Save the variable's value.
+ const char* save_value = getenv(kDeletedVariable);
+ EXPECT_NE(save_value, nullptr);
+ // Delete the variable.
+ EXPECT_EQ(unsetenv(kDeletedVariable), 0);
+ // Test that it is not exported.
+ std::vector<std::string> command;
+ if (kIsTargetBuild) {
+ std::string android_root(GetAndroidRoot());
+ command.push_back(android_root + "/bin/printenv");
+ } else {
+ command.push_back("/usr/bin/printenv");
+ }
+ command.push_back(kDeletedVariable);
+ std::string error_msg;
+ if (!(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
+ // Running on valgrind fails due to some memory that leaks in thread alternate signal stacks.
+ EXPECT_TRUE(Exec(command, &error_msg));
+ EXPECT_EQ(0U, error_msg.size()) << error_msg;
+ }
+ // Restore the variable's value.
+ EXPECT_EQ(setenv(kDeletedVariable, save_value, kOverwrite), 0);
+}
+
TEST_F(UtilsTest, IsValidDescriptor) {
std::vector<uint8_t> descriptor(
{ 'L', 'a', '/', 'b', '$', 0xed, 0xa0, 0x80, 0xed, 0xb0, 0x80, ';', 0x00 });
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 40f12e9..589e71c 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -159,7 +159,7 @@
&dex_file,
dex_cache,
class_loader,
- class_def,
+ *class_def,
callbacks,
allow_soft_failures,
log_level,
@@ -190,7 +190,7 @@
MethodVerifier::FailureData MethodVerifier::VerifyMethods(Thread* self,
ClassLinker* linker,
const DexFile* dex_file,
- const DexFile::ClassDef* class_def,
+ const DexFile::ClassDef& class_def,
ClassDataItemIterator* it,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
@@ -214,7 +214,7 @@
continue;
}
previous_method_idx = method_idx;
- InvokeType type = it->GetMethodInvokeType(*class_def);
+ InvokeType type = it->GetMethodInvokeType(class_def);
ArtMethod* method = linker->ResolveMethod<ClassLinker::kNoICCECheckForCache>(
*dex_file, method_idx, dex_cache, class_loader, nullptr, type);
if (method == nullptr) {
@@ -247,7 +247,7 @@
} else {
// If we didn't log a hard failure before, print the header of the message.
*error_string += "Verifier rejected class ";
- *error_string += PrettyDescriptor(dex_file->GetClassDescriptor(*class_def));
+ *error_string += PrettyDescriptor(dex_file->GetClassDescriptor(class_def));
*error_string += ":";
}
*error_string += " ";
@@ -264,23 +264,22 @@
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
- const DexFile::ClassDef* class_def,
+ const DexFile::ClassDef& class_def,
CompilerCallbacks* callbacks,
bool allow_soft_failures,
LogSeverity log_level,
std::string* error) {
- DCHECK(class_def != nullptr);
ScopedTrace trace(__FUNCTION__);
// A class must not be abstract and final.
- if ((class_def->access_flags_ & (kAccAbstract | kAccFinal)) == (kAccAbstract | kAccFinal)) {
+ if ((class_def.access_flags_ & (kAccAbstract | kAccFinal)) == (kAccAbstract | kAccFinal)) {
*error = "Verifier rejected class ";
- *error += PrettyDescriptor(dex_file->GetClassDescriptor(*class_def));
+ *error += PrettyDescriptor(dex_file->GetClassDescriptor(class_def));
*error += ": class is abstract and final.";
return kHardFailure;
}
- const uint8_t* class_data = dex_file->GetClassData(*class_def);
+ const uint8_t* class_data = dex_file->GetClassData(class_def);
if (class_data == nullptr) {
// empty class, probably a marker interface
return kNoFailure;
@@ -327,7 +326,7 @@
// warning.
std::string tmp =
StringPrintf("Class %s failed lock verification and will run slower.",
- PrettyDescriptor(dex_file->GetClassDescriptor(*class_def)).c_str());
+ PrettyDescriptor(dex_file->GetClassDescriptor(class_def)).c_str());
if (!gPrintedDxMonitorText) {
tmp = tmp + "\nCommon causes for lock verification issues are non-optimized dex code\n"
"and incorrect proguard optimizations.";
@@ -355,7 +354,7 @@
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
- const DexFile::ClassDef* class_def,
+ const DexFile::ClassDef& class_def,
const DexFile::CodeItem* code_item,
ArtMethod* method,
uint32_t method_access_flags,
@@ -436,7 +435,7 @@
if (callbacks != nullptr) {
// Let the interested party know that we failed the class.
- ClassReference ref(dex_file, dex_file->GetIndexForClassDef(*class_def));
+ ClassReference ref(dex_file, dex_file->GetIndexForClassDef(class_def));
callbacks->ClassRejected(ref);
}
}
@@ -463,7 +462,7 @@
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
- const DexFile::ClassDef* class_def,
+ const DexFile::ClassDef& class_def,
const DexFile::CodeItem* code_item,
ArtMethod* method,
uint32_t method_access_flags) {
@@ -499,7 +498,7 @@
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
- const DexFile::ClassDef* class_def,
+ const DexFile::ClassDef& class_def,
const DexFile::CodeItem* code_item,
uint32_t dex_method_idx,
ArtMethod* method,
@@ -544,7 +543,6 @@
is_constructor_(false),
link_(nullptr) {
self->PushVerifier(this);
- DCHECK(class_def != nullptr);
}
MethodVerifier::~MethodVerifier() {
@@ -561,7 +559,7 @@
m->GetDexFile(),
dex_cache,
class_loader,
- &m->GetClassDef(),
+ m->GetClassDef(),
m->GetCodeItem(),
m->GetDexMethodIndex(),
m,
@@ -616,7 +614,7 @@
m->GetDexFile(),
dex_cache,
class_loader,
- &m->GetClassDef(),
+ m->GetClassDef(),
m->GetCodeItem(),
m->GetDexMethodIndex(),
m,
@@ -656,7 +654,7 @@
m->GetDexFile(),
dex_cache,
class_loader,
- &m->GetClassDef(),
+ m->GetClassDef(),
m->GetCodeItem(),
m->GetDexMethodIndex(),
m,
@@ -761,7 +759,7 @@
return false;
}
}
- if ((class_def_->GetJavaAccessFlags() & kAccInterface) != 0) {
+ if ((class_def_.GetJavaAccessFlags() & kAccInterface) != 0) {
// Interface methods must be public and abstract (if default methods are disabled).
uint32_t kRequired = kAccPublic;
if ((method_access_flags_ & kRequired) != kRequired) {
@@ -792,7 +790,7 @@
return false;
}
- if ((class_def_->GetJavaAccessFlags() & kAccInterface) != 0) {
+ if ((class_def_.GetJavaAccessFlags() & kAccInterface) != 0) {
// Interfaces may always have static initializers for their fields. If we are running with
// default methods enabled we also allow other public, static, non-final methods to have code.
// Otherwise that is the only type of method allowed.
@@ -3986,7 +3984,7 @@
++pos_;
}
- const char* GetDescriptor() SHARED_REQUIRES(Locks::mutator_lock_) {
+ const char* GetDescriptor() REQUIRES_SHARED(Locks::mutator_lock_) {
return res_method_->GetTypeDescriptorFromTypeIdx(params_->GetTypeItem(pos_).type_idx_);
}
@@ -4023,7 +4021,7 @@
}
if (reference_class->IsInterface()) {
// TODO Can we verify anything else.
- if (class_idx == class_def_->class_idx_) {
+ if (class_idx == class_def_.class_idx_) {
Fail(VERIFY_ERROR_CLASS_CHANGE) << "Cannot invoke-super on self as interface";
return nullptr;
}
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 5fe95c2..d4e12f7 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -154,17 +154,17 @@
bool allow_soft_failures,
LogSeverity log_level,
std::string* error)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static FailureKind VerifyClass(Thread* self,
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
- const DexFile::ClassDef* class_def,
+ const DexFile::ClassDef& class_def,
CompilerCallbacks* callbacks,
bool allow_soft_failures,
LogSeverity log_level,
std::string* error)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static MethodVerifier* VerifyMethodAndDump(Thread* self,
VariableIndentationOutputStream* vios,
@@ -172,10 +172,10 @@
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
- const DexFile::ClassDef* class_def,
+ const DexFile::ClassDef& class_def,
const DexFile::CodeItem* code_item, ArtMethod* method,
uint32_t method_access_flags)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
uint8_t EncodePcToReferenceMapData() const;
@@ -198,26 +198,26 @@
// Dump the state of the verifier, namely each instruction, what flags are set on it, register
// information
- void Dump(std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_);
- void Dump(VariableIndentationOutputStream* vios) SHARED_REQUIRES(Locks::mutator_lock_);
+ void Dump(std::ostream& os) REQUIRES_SHARED(Locks::mutator_lock_);
+ void Dump(VariableIndentationOutputStream* vios) REQUIRES_SHARED(Locks::mutator_lock_);
// Fills 'monitor_enter_dex_pcs' with the dex pcs of the monitor-enter instructions corresponding
// to the locks held at 'dex_pc' in method 'm'.
static void FindLocksAtDexPc(ArtMethod* m, uint32_t dex_pc,
std::vector<uint32_t>* monitor_enter_dex_pcs)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the accessed field corresponding to the quick instruction's field
// offset at 'dex_pc' in method 'm'.
static ArtField* FindAccessedFieldAtDexPc(ArtMethod* m, uint32_t dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the invoked method corresponding to the quick instruction's vtable
// index at 'dex_pc' in method 'm'.
static ArtMethod* FindInvokedMethodAtDexPc(ArtMethod* m, uint32_t dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- static void Init() SHARED_REQUIRES(Locks::mutator_lock_);
+ static void Init() REQUIRES_SHARED(Locks::mutator_lock_);
static void Shutdown();
bool CanLoadClasses() const {
@@ -228,24 +228,24 @@
// Run verification on the method. Returns true if verification completes and false if the input
// has an irrecoverable corruption.
- bool Verify() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool Verify() REQUIRES_SHARED(Locks::mutator_lock_);
// Describe VRegs at the given dex pc.
std::vector<int32_t> DescribeVRegs(uint32_t dex_pc);
static void VisitStaticRoots(RootVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void VisitRoots(RootVisitor* visitor, const RootInfo& roots)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Accessors used by the compiler via CompilerCallback
const DexFile::CodeItem* CodeItem() const;
RegisterLine* GetRegLine(uint32_t dex_pc);
ALWAYS_INLINE const InstructionFlags& GetInstructionFlags(size_t index) const;
ALWAYS_INLINE InstructionFlags& GetInstructionFlags(size_t index);
- mirror::ClassLoader* GetClassLoader() SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
- ArtMethod* GetMethod() const SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::ClassLoader* GetClassLoader() REQUIRES_SHARED(Locks::mutator_lock_);
+ mirror::DexCache* GetDexCache() REQUIRES_SHARED(Locks::mutator_lock_);
+ ArtMethod* GetMethod() const REQUIRES_SHARED(Locks::mutator_lock_);
MethodReference GetMethodReference() const;
uint32_t GetAccessFlags() const;
bool HasCheckCasts() const;
@@ -256,15 +256,15 @@
}
const RegType& ResolveCheckedClass(uint32_t class_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the method of a quick invoke or null if it cannot be found.
ArtMethod* GetQuickInvokedMethod(const Instruction* inst, RegisterLine* reg_line,
bool is_range, bool allow_failure)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the access field of a quick field access (iget/iput-quick) or null
// if it cannot be found.
ArtField* GetQuickFieldAccess(const Instruction* inst, RegisterLine* reg_line)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
uint32_t GetEncounteredFailureTypes() {
return encountered_failure_types_;
@@ -283,7 +283,7 @@
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
- const DexFile::ClassDef* class_def,
+ const DexFile::ClassDef& class_def,
const DexFile::CodeItem* code_item,
uint32_t method_idx,
ArtMethod* method,
@@ -293,10 +293,10 @@
bool need_precise_constants,
bool verify_to_dump,
bool allow_thread_suspension)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void UninstantiableError(const char* descriptor);
- static bool IsInstantiableOrPrimitive(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ static bool IsInstantiableOrPrimitive(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
// Is the method being verified a constructor? See the comment on the field.
bool IsConstructor() const {
@@ -330,7 +330,7 @@
static FailureData VerifyMethods(Thread* self,
ClassLinker* linker,
const DexFile* dex_file,
- const DexFile::ClassDef* class_def,
+ const DexFile::ClassDef& class_def,
ClassDataItemIterator* it,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
@@ -339,7 +339,7 @@
LogSeverity log_level,
bool need_precise_constants,
std::string* error_string)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Perform verification on a single method.
@@ -356,7 +356,7 @@
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
- const DexFile::ClassDef* class_def_idx,
+ const DexFile::ClassDef& class_def_idx,
const DexFile::CodeItem* code_item,
ArtMethod* method,
uint32_t method_access_flags,
@@ -365,18 +365,18 @@
LogSeverity log_level,
bool need_precise_constants,
std::string* hard_failure_msg)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- void FindLocksAtDexPc() SHARED_REQUIRES(Locks::mutator_lock_);
+ void FindLocksAtDexPc() REQUIRES_SHARED(Locks::mutator_lock_);
ArtField* FindAccessedFieldAtDexPc(uint32_t dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindInvokedMethodAtDexPc(uint32_t dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
SafeMap<uint32_t, std::set<uint32_t>>& FindStringInitMap()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Compute the width of the instruction at each address in the instruction stream, and store it in
@@ -404,7 +404,7 @@
* Returns "false" if something in the exception table looks fishy, but we're expecting the
* exception table to be somewhat sane.
*/
- bool ScanTryCatchBlocks() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool ScanTryCatchBlocks() REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Perform static verification on all instructions in a method.
@@ -510,11 +510,11 @@
bool* selfOkay);
/* Perform detailed code-flow analysis on a single method. */
- bool VerifyCodeFlow() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool VerifyCodeFlow() REQUIRES_SHARED(Locks::mutator_lock_);
// Set the register types for the first instruction in the method based on the method signature.
// This has the side-effect of validating the signature.
- bool SetTypesFromSignature() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool SetTypesFromSignature() REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Perform code flow on a method.
@@ -562,7 +562,7 @@
* reordering by specifying that you can't execute the new-instance instruction if a register
* contains an uninitialized instance created by that same instruction.
*/
- bool CodeFlowVerifyMethod() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool CodeFlowVerifyMethod() REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Perform verification for a single instruction.
@@ -574,33 +574,33 @@
* addresses. Does not set or clear any other flags in "insn_flags_".
*/
bool CodeFlowVerifyInstruction(uint32_t* start_guess)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Perform verification of a new array instruction
void VerifyNewArray(const Instruction* inst, bool is_filled, bool is_range)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Helper to perform verification on puts of primitive type.
void VerifyPrimitivePut(const RegType& target_type, const RegType& insn_type,
- const uint32_t vregA) SHARED_REQUIRES(Locks::mutator_lock_);
+ const uint32_t vregA) REQUIRES_SHARED(Locks::mutator_lock_);
// Perform verification of an aget instruction. The destination register's type will be set to
// be that of component type of the array unless the array type is unknown, in which case a
// bottom type inferred from the type of instruction is used. is_primitive is false for an
// aget-object.
void VerifyAGet(const Instruction* inst, const RegType& insn_type,
- bool is_primitive) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool is_primitive) REQUIRES_SHARED(Locks::mutator_lock_);
// Perform verification of an aput instruction.
void VerifyAPut(const Instruction* inst, const RegType& insn_type,
- bool is_primitive) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool is_primitive) REQUIRES_SHARED(Locks::mutator_lock_);
// Lookup instance field and fail for resolution violations
ArtField* GetInstanceField(const RegType& obj_type, int field_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Lookup static field and fail for resolution violations
- ArtField* GetStaticField(int field_idx) SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtField* GetStaticField(int field_idx) REQUIRES_SHARED(Locks::mutator_lock_);
// Perform verification of an iget/sget/iput/sput instruction.
enum class FieldAccessType { // private
@@ -610,16 +610,16 @@
template <FieldAccessType kAccType>
void VerifyISFieldAccess(const Instruction* inst, const RegType& insn_type,
bool is_primitive, bool is_static)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <FieldAccessType kAccType>
void VerifyQuickFieldAccess(const Instruction* inst, const RegType& insn_type, bool is_primitive)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Resolves a class based on an index and performs access checks to ensure the referrer can
// access the resolved class.
const RegType& ResolveClassAndCheckAccess(uint32_t class_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* For the "move-exception" instruction at "work_insn_idx_", which must be at an exception handler
@@ -627,7 +627,7 @@
* exception handler can be found or if the Join of exception types fails.
*/
const RegType& GetCaughtExceptionType()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Resolves a method based on an index and performs access checks to ensure
@@ -635,7 +635,7 @@
* Does not throw exceptions.
*/
ArtMethod* ResolveMethodAndCheckAccess(uint32_t method_idx, MethodType method_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Verify the arguments to a method. We're executing in "method", making
@@ -660,22 +660,22 @@
* set appropriately).
*/
ArtMethod* VerifyInvocationArgs(const Instruction* inst, MethodType method_type, bool is_range)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Similar checks to the above, but on the proto. Will be used when the method cannot be
// resolved.
void VerifyInvocationArgsUnresolvedMethod(const Instruction* inst, MethodType method_type,
bool is_range)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <class T>
ArtMethod* VerifyInvocationArgsFromIterator(T* it, const Instruction* inst,
MethodType method_type, bool is_range,
ArtMethod* res_method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* VerifyInvokeVirtualQuickArgs(const Instruction* inst, bool is_range)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Verify that the target instruction is not "move-exception". It's important that the only way
@@ -707,18 +707,18 @@
* Returns "false" if an error is encountered.
*/
bool UpdateRegisters(uint32_t next_insn, RegisterLine* merge_line, bool update_merge_line)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Return the register type for the method.
- const RegType& GetMethodReturnType() SHARED_REQUIRES(Locks::mutator_lock_);
+ const RegType& GetMethodReturnType() REQUIRES_SHARED(Locks::mutator_lock_);
// Get a type representing the declaring class of the method.
- const RegType& GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ const RegType& GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_);
InstructionFlags* CurrentInsnFlags();
const RegType& DetermineCat1Constant(int32_t value, bool precise)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Try to create a register type from the given class. In case a precise type is requested, but
// the class is not instantiable, a soft error (of type NO_CLASS) will be enqueued and a
@@ -726,7 +726,7 @@
// Note: we reuse NO_CLASS as this will throw an exception at runtime, when the failing class is
// actually touched.
const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// The thread we're verifying on.
Thread* const self_;
@@ -759,7 +759,7 @@
Handle<mirror::DexCache> dex_cache_ GUARDED_BY(Locks::mutator_lock_);
// The class loader for the declaring class of the method.
Handle<mirror::ClassLoader> class_loader_ GUARDED_BY(Locks::mutator_lock_);
- const DexFile::ClassDef* const class_def_; // The class def of the declaring class of the method.
+ const DexFile::ClassDef& class_def_; // The class def of the declaring class of the method.
const DexFile::CodeItem* const code_item_; // The code item containing the code for the method.
const RegType* declaring_class_; // Lazily computed reg type of the method's declaring class.
// Instruction widths and flags, one entry per code unit.
diff --git a/runtime/verifier/method_verifier_test.cc b/runtime/verifier/method_verifier_test.cc
index b036313..4fd581d 100644
--- a/runtime/verifier/method_verifier_test.cc
+++ b/runtime/verifier/method_verifier_test.cc
@@ -30,7 +30,7 @@
class MethodVerifierTest : public CommonRuntimeTest {
protected:
void VerifyClass(const std::string& descriptor)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ASSERT_TRUE(descriptor != nullptr);
Thread* self = Thread::Current();
mirror::Class* klass = class_linker_->FindSystemClass(self, descriptor.c_str());
@@ -47,7 +47,7 @@
}
void VerifyDexFile(const DexFile& dex)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Verify all the classes defined in this file
for (size_t i = 0; i < dex.NumClassDefs(); i++) {
const DexFile::ClassDef& class_def = dex.GetClassDef(i);
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 85daba9..5c19969 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -279,7 +279,7 @@
}
}
-std::string UndefinedType::Dump() const SHARED_REQUIRES(Locks::mutator_lock_) {
+std::string UndefinedType::Dump() const REQUIRES_SHARED(Locks::mutator_lock_) {
return "Undefined";
}
@@ -517,11 +517,11 @@
}
}
-bool RegType::IsJavaLangObject() const SHARED_REQUIRES(Locks::mutator_lock_) {
+bool RegType::IsJavaLangObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
return IsReference() && GetClass()->IsObjectClass();
}
-bool RegType::IsObjectArrayTypes() const SHARED_REQUIRES(Locks::mutator_lock_) {
+bool RegType::IsObjectArrayTypes() const REQUIRES_SHARED(Locks::mutator_lock_) {
if (IsUnresolvedTypes()) {
DCHECK(!IsUnresolvedMergedReference());
@@ -542,7 +542,7 @@
}
}
-bool RegType::IsArrayTypes() const SHARED_REQUIRES(Locks::mutator_lock_) {
+bool RegType::IsArrayTypes() const REQUIRES_SHARED(Locks::mutator_lock_) {
if (IsUnresolvedTypes()) {
DCHECK(!IsUnresolvedMergedReference());
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 4837490..c3ed77a 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -35,6 +35,7 @@
namespace art {
namespace mirror {
class Class;
+class ClassLoader;
} // namespace mirror
class ArenaBitVector;
@@ -118,7 +119,7 @@
}
// The high half that corresponds to this low half
const RegType& HighHalf(RegTypeCache* cache) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool IsConstantBoolean() const;
virtual bool IsConstantChar() const { return false; }
@@ -171,20 +172,20 @@
return result;
}
virtual bool HasClassVirtual() const { return false; }
- bool IsJavaLangObject() const SHARED_REQUIRES(Locks::mutator_lock_);
- virtual bool IsArrayTypes() const SHARED_REQUIRES(Locks::mutator_lock_);
- virtual bool IsObjectArrayTypes() const SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsJavaLangObject() const REQUIRES_SHARED(Locks::mutator_lock_);
+ virtual bool IsArrayTypes() const REQUIRES_SHARED(Locks::mutator_lock_);
+ virtual bool IsObjectArrayTypes() const REQUIRES_SHARED(Locks::mutator_lock_);
Primitive::Type GetPrimitiveType() const;
bool IsJavaLangObjectArray() const
- SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsInstantiableTypes() const SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsInstantiableTypes() const REQUIRES_SHARED(Locks::mutator_lock_);
const StringPiece& GetDescriptor() const {
DCHECK(HasClass() ||
(IsUnresolvedTypes() && !IsUnresolvedMergedReference() &&
!IsUnresolvedSuperClass()));
return descriptor_;
}
- mirror::Class* GetClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Class* GetClass() const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!IsUnresolvedReference());
DCHECK(!klass_.IsNull()) << Dump();
DCHECK(HasClass());
@@ -192,25 +193,25 @@
}
uint16_t GetId() const { return cache_id_; }
const RegType& GetSuperClass(RegTypeCache* cache) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
virtual std::string Dump() const
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Can this type access other?
bool CanAccess(const RegType& other) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Can this type access a member with the given properties?
bool CanAccessMember(mirror::Class* klass, uint32_t access_flags) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Can this type be assigned by src?
// Note: Object and interface types may always be assigned to one another, see
// comment on
// ClassJoin.
bool IsAssignableFrom(const RegType& src) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Can this array type potentially be assigned by src.
// This function is necessary as array types are valid even if their components types are not,
@@ -221,13 +222,13 @@
// (both are reference types).
bool CanAssignArray(const RegType& src, RegTypeCache& reg_types,
Handle<mirror::ClassLoader> class_loader, bool* soft_error) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Can this type be assigned by src? Variant of IsAssignableFrom that doesn't
// allow assignment to
// an interface from an Object.
bool IsStrictlyAssignableFrom(const RegType& src) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Are these RegTypes the same?
bool Equals(const RegType& other) const { return GetId() == other.GetId(); }
@@ -235,10 +236,10 @@
// Compute the merge of this register from one edge (path) with incoming_type
// from another.
const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Same as above, but also handles the case where incoming_type == this.
const RegType& SafeMerge(const RegType& incoming_type, RegTypeCache* reg_types) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (Equals(incoming_type)) {
return *this;
}
@@ -262,12 +263,12 @@
* [1] Java bytecode verification: algorithms and formalizations, Xavier Leroy
*/
static mirror::Class* ClassJoin(mirror::Class* s, mirror::Class* t)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
virtual ~RegType() {}
void VisitRoots(RootVisitor* visitor, const RootInfo& root_info) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void* operator new(size_t size) noexcept {
return ::operator new(size);
@@ -279,7 +280,7 @@
protected:
RegType(mirror::Class* klass,
const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: descriptor_(descriptor),
klass_(klass),
cache_id_(cache_id) {
@@ -288,7 +289,7 @@
}
}
- void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_);
const StringPiece descriptor_;
mutable GcRoot<mirror::Class> klass_; // Non-const only due to moving classes.
@@ -298,7 +299,7 @@
private:
static bool AssignableFrom(const RegType& lhs, const RegType& rhs, bool strict)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
DISALLOW_COPY_AND_ASSIGN(RegType);
};
@@ -308,7 +309,7 @@
public:
bool IsConflict() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
// Get the singleton Conflict instance.
static const ConflictType* GetInstance() PURE;
@@ -317,14 +318,14 @@
static const ConflictType* CreateInstance(mirror::Class* klass,
const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Destroy the singleton instance.
static void Destroy();
private:
ConflictType(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: RegType(klass, descriptor, cache_id) {}
static const ConflictType* instance_;
@@ -337,7 +338,7 @@
public:
bool IsUndefined() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
// Get the singleton Undefined instance.
static const UndefinedType* GetInstance() PURE;
@@ -346,14 +347,14 @@
static const UndefinedType* CreateInstance(mirror::Class* klass,
const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Destroy the singleton instance.
static void Destroy();
private:
UndefinedType(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: RegType(klass, descriptor, cache_id) {}
static const UndefinedType* instance_;
@@ -362,7 +363,7 @@
class PrimitiveType : public RegType {
public:
PrimitiveType(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_);
bool HasClassVirtual() const OVERRIDE { return true; }
};
@@ -370,23 +371,23 @@
class Cat1Type : public PrimitiveType {
public:
Cat1Type(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_);
};
class IntegerType : public Cat1Type {
public:
bool IsInteger() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
static const IntegerType* CreateInstance(mirror::Class* klass,
const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static const IntegerType* GetInstance() PURE;
static void Destroy();
private:
IntegerType(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const IntegerType* instance_;
};
@@ -394,17 +395,17 @@
class BooleanType FINAL : public Cat1Type {
public:
bool IsBoolean() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
static const BooleanType* CreateInstance(mirror::Class* klass,
const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static const BooleanType* GetInstance() PURE;
static void Destroy();
private:
BooleanType(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const BooleanType* instance_;
@@ -413,17 +414,17 @@
class ByteType FINAL : public Cat1Type {
public:
bool IsByte() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
static const ByteType* CreateInstance(mirror::Class* klass,
const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static const ByteType* GetInstance() PURE;
static void Destroy();
private:
ByteType(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const ByteType* instance_;
};
@@ -431,17 +432,17 @@
class ShortType FINAL : public Cat1Type {
public:
bool IsShort() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
static const ShortType* CreateInstance(mirror::Class* klass,
const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static const ShortType* GetInstance() PURE;
static void Destroy();
private:
ShortType(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const ShortType* instance_;
};
@@ -449,17 +450,17 @@
class CharType FINAL : public Cat1Type {
public:
bool IsChar() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
static const CharType* CreateInstance(mirror::Class* klass,
const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static const CharType* GetInstance() PURE;
static void Destroy();
private:
CharType(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const CharType* instance_;
};
@@ -467,17 +468,17 @@
class FloatType FINAL : public Cat1Type {
public:
bool IsFloat() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
static const FloatType* CreateInstance(mirror::Class* klass,
const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static const FloatType* GetInstance() PURE;
static void Destroy();
private:
FloatType(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const FloatType* instance_;
};
@@ -485,86 +486,86 @@
class Cat2Type : public PrimitiveType {
public:
Cat2Type(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_);
};
class LongLoType FINAL : public Cat2Type {
public:
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
bool IsLongLo() const OVERRIDE { return true; }
bool IsLong() const OVERRIDE { return true; }
static const LongLoType* CreateInstance(mirror::Class* klass,
const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static const LongLoType* GetInstance() PURE;
static void Destroy();
private:
LongLoType(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: Cat2Type(klass, descriptor, cache_id) {}
static const LongLoType* instance_;
};
class LongHiType FINAL : public Cat2Type {
public:
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
bool IsLongHi() const OVERRIDE { return true; }
static const LongHiType* CreateInstance(mirror::Class* klass,
const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static const LongHiType* GetInstance() PURE;
static void Destroy();
private:
LongHiType(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: Cat2Type(klass, descriptor, cache_id) {}
static const LongHiType* instance_;
};
class DoubleLoType FINAL : public Cat2Type {
public:
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
bool IsDoubleLo() const OVERRIDE { return true; }
bool IsDouble() const OVERRIDE { return true; }
static const DoubleLoType* CreateInstance(mirror::Class* klass,
const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static const DoubleLoType* GetInstance() PURE;
static void Destroy();
private:
DoubleLoType(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: Cat2Type(klass, descriptor, cache_id) {}
static const DoubleLoType* instance_;
};
class DoubleHiType FINAL : public Cat2Type {
public:
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
virtual bool IsDoubleHi() const OVERRIDE { return true; }
static const DoubleHiType* CreateInstance(mirror::Class* klass,
const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static const DoubleHiType* GetInstance() PURE;
static void Destroy();
private:
DoubleHiType(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: Cat2Type(klass, descriptor, cache_id) {}
static const DoubleHiType* instance_;
};
class ConstantType : public RegType {
public:
- ConstantType(uint32_t constant, uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ ConstantType(uint32_t constant, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: RegType(nullptr, "", cache_id), constant_(constant) {
}
@@ -622,58 +623,58 @@
class PreciseConstType FINAL : public ConstantType {
public:
PreciseConstType(uint32_t constant, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: ConstantType(constant, cache_id) {}
bool IsPreciseConstant() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
};
class PreciseConstLoType FINAL : public ConstantType {
public:
PreciseConstLoType(uint32_t constant, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: ConstantType(constant, cache_id) {}
bool IsPreciseConstantLo() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
};
class PreciseConstHiType FINAL : public ConstantType {
public:
PreciseConstHiType(uint32_t constant, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: ConstantType(constant, cache_id) {}
bool IsPreciseConstantHi() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
};
class ImpreciseConstType FINAL : public ConstantType {
public:
ImpreciseConstType(uint32_t constat, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: ConstantType(constat, cache_id) {
}
bool IsImpreciseConstant() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
};
class ImpreciseConstLoType FINAL : public ConstantType {
public:
ImpreciseConstLoType(uint32_t constant, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: ConstantType(constant, cache_id) {}
bool IsImpreciseConstantLo() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
};
class ImpreciseConstHiType FINAL : public ConstantType {
public:
ImpreciseConstHiType(uint32_t constant, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: ConstantType(constant, cache_id) {}
bool IsImpreciseConstantHi() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
};
// Common parent of all uninitialized types. Uninitialized types are created by
@@ -703,14 +704,14 @@
UninitializedReferenceType(mirror::Class* klass,
const StringPiece& descriptor,
uint32_t allocation_pc, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: UninitializedType(klass, descriptor, allocation_pc, cache_id) {}
bool IsUninitializedReference() const OVERRIDE { return true; }
bool HasClassVirtual() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
};
// Similar to UnresolvedReferenceType but not yet having been passed to a
@@ -719,7 +720,7 @@
public:
UnresolvedUninitializedRefType(const StringPiece& descriptor,
uint32_t allocation_pc, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: UninitializedType(nullptr, descriptor, allocation_pc, cache_id) {
if (kIsDebugBuild) {
CheckInvariants();
@@ -730,10 +731,10 @@
bool IsUnresolvedTypes() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_);
};
// Similar to UninitializedReferenceType but special case for the this argument
@@ -743,7 +744,7 @@
UninitializedThisReferenceType(mirror::Class* klass,
const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: UninitializedType(klass, descriptor, 0, cache_id) {
if (kIsDebugBuild) {
CheckInvariants();
@@ -754,17 +755,17 @@
bool HasClassVirtual() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_);
};
class UnresolvedUninitializedThisRefType FINAL : public UninitializedType {
public:
UnresolvedUninitializedThisRefType(const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: UninitializedType(nullptr, descriptor, 0, cache_id) {
if (kIsDebugBuild) {
CheckInvariants();
@@ -775,10 +776,10 @@
bool IsUnresolvedTypes() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_);
};
// A type of register holding a reference to an Object of type GetClass or a
@@ -786,7 +787,7 @@
class ReferenceType FINAL : public RegType {
public:
ReferenceType(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: RegType(klass, descriptor, cache_id) {}
bool IsReference() const OVERRIDE { return true; }
@@ -795,7 +796,7 @@
bool HasClassVirtual() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
};
// A type of register holding a reference to an Object of type GetClass and only
@@ -805,7 +806,7 @@
public:
PreciseReferenceType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool IsPreciseReference() const OVERRIDE { return true; }
@@ -813,14 +814,14 @@
bool HasClassVirtual() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
};
// Common parent of unresolved types.
class UnresolvedType : public RegType {
public:
UnresolvedType(const StringPiece& descriptor, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: RegType(nullptr, descriptor, cache_id) {}
bool IsNonZeroReferenceTypes() const OVERRIDE;
@@ -832,7 +833,7 @@
class UnresolvedReferenceType FINAL : public UnresolvedType {
public:
UnresolvedReferenceType(const StringPiece& descriptor, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: UnresolvedType(descriptor, cache_id) {
if (kIsDebugBuild) {
CheckInvariants();
@@ -843,10 +844,10 @@
bool IsUnresolvedTypes() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_);
};
// Type representing the super-class of an unresolved type.
@@ -854,7 +855,7 @@
public:
UnresolvedSuperClass(uint16_t child_id, RegTypeCache* reg_type_cache,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: UnresolvedType("", cache_id),
unresolved_child_id_(child_id),
reg_type_cache_(reg_type_cache) {
@@ -872,10 +873,10 @@
return static_cast<uint16_t>(unresolved_child_id_ & 0xFFFF);
}
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_);
const uint16_t unresolved_child_id_;
const RegTypeCache* const reg_type_cache_;
@@ -890,7 +891,7 @@
const BitVector& unresolved,
const RegTypeCache* reg_type_cache,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// The resolved part. See description below.
const RegType& GetResolvedPart() const {
@@ -905,13 +906,13 @@
bool IsUnresolvedTypes() const OVERRIDE { return true; }
- bool IsArrayTypes() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsObjectArrayTypes() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsArrayTypes() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsObjectArrayTypes() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_);
const RegTypeCache* const reg_type_cache_;
@@ -927,7 +928,7 @@
};
std::ostream& operator<<(std::ostream& os, const RegType& rhs)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
} // namespace verifier
} // namespace art
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 71c2a90..4d4886e 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -36,7 +36,7 @@
kMinSmallConstant + 1];
ALWAYS_INLINE static inline bool MatchingPrecisionForClass(const RegType* entry, bool precise)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (entry->IsPreciseReference() == precise) {
// We were or weren't looking for a precise reference and we found what we need.
return true;
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index 6f9a04e..14d9509 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -46,7 +46,7 @@
public:
explicit RegTypeCache(bool can_load_classes, ScopedArenaAllocator& arena);
~RegTypeCache();
- static void Init() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static void Init() REQUIRES_SHARED(Locks::mutator_lock_) {
if (!RegTypeCache::primitive_initialized_) {
CHECK_EQ(RegTypeCache::primitive_count_, 0);
CreatePrimitiveAndSmallConstantTypes();
@@ -57,114 +57,114 @@
static void ShutDown();
const art::verifier::RegType& GetFromId(uint16_t id) const;
const RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Find a RegType, returns null if not found.
const RegType* FindClass(mirror::Class* klass, bool precise) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Insert a new class with a specified descriptor, must not already be in the cache.
const RegType* InsertClass(const StringPiece& descriptor, mirror::Class* klass, bool precise)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get or insert a reg type for a description, klass, and precision.
const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const ConstantType& FromCat1Const(int32_t value, bool precise)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const ConstantType& FromCat2ConstLo(int32_t value, bool precise)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const ConstantType& FromCat2ConstHi(int32_t value, bool precise)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const RegType& FromDescriptor(mirror::ClassLoader* loader, const char* descriptor, bool precise)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const RegType& FromUnresolvedMerge(const RegType& left, const RegType& right)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const RegType& FromUnresolvedSuperClass(const RegType& child)
- SHARED_REQUIRES(Locks::mutator_lock_);
- const ConstantType& Zero() SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ const ConstantType& Zero() REQUIRES_SHARED(Locks::mutator_lock_) {
return FromCat1Const(0, true);
}
- const ConstantType& One() SHARED_REQUIRES(Locks::mutator_lock_) {
+ const ConstantType& One() REQUIRES_SHARED(Locks::mutator_lock_) {
return FromCat1Const(1, true);
}
size_t GetCacheSize() {
return entries_.size();
}
- const BooleanType& Boolean() SHARED_REQUIRES(Locks::mutator_lock_) {
+ const BooleanType& Boolean() REQUIRES_SHARED(Locks::mutator_lock_) {
return *BooleanType::GetInstance();
}
- const ByteType& Byte() SHARED_REQUIRES(Locks::mutator_lock_) {
+ const ByteType& Byte() REQUIRES_SHARED(Locks::mutator_lock_) {
return *ByteType::GetInstance();
}
- const CharType& Char() SHARED_REQUIRES(Locks::mutator_lock_) {
+ const CharType& Char() REQUIRES_SHARED(Locks::mutator_lock_) {
return *CharType::GetInstance();
}
- const ShortType& Short() SHARED_REQUIRES(Locks::mutator_lock_) {
+ const ShortType& Short() REQUIRES_SHARED(Locks::mutator_lock_) {
return *ShortType::GetInstance();
}
- const IntegerType& Integer() SHARED_REQUIRES(Locks::mutator_lock_) {
+ const IntegerType& Integer() REQUIRES_SHARED(Locks::mutator_lock_) {
return *IntegerType::GetInstance();
}
- const FloatType& Float() SHARED_REQUIRES(Locks::mutator_lock_) {
+ const FloatType& Float() REQUIRES_SHARED(Locks::mutator_lock_) {
return *FloatType::GetInstance();
}
- const LongLoType& LongLo() SHARED_REQUIRES(Locks::mutator_lock_) {
+ const LongLoType& LongLo() REQUIRES_SHARED(Locks::mutator_lock_) {
return *LongLoType::GetInstance();
}
- const LongHiType& LongHi() SHARED_REQUIRES(Locks::mutator_lock_) {
+ const LongHiType& LongHi() REQUIRES_SHARED(Locks::mutator_lock_) {
return *LongHiType::GetInstance();
}
- const DoubleLoType& DoubleLo() SHARED_REQUIRES(Locks::mutator_lock_) {
+ const DoubleLoType& DoubleLo() REQUIRES_SHARED(Locks::mutator_lock_) {
return *DoubleLoType::GetInstance();
}
- const DoubleHiType& DoubleHi() SHARED_REQUIRES(Locks::mutator_lock_) {
+ const DoubleHiType& DoubleHi() REQUIRES_SHARED(Locks::mutator_lock_) {
return *DoubleHiType::GetInstance();
}
- const UndefinedType& Undefined() SHARED_REQUIRES(Locks::mutator_lock_) {
+ const UndefinedType& Undefined() REQUIRES_SHARED(Locks::mutator_lock_) {
return *UndefinedType::GetInstance();
}
const ConflictType& Conflict() {
return *ConflictType::GetInstance();
}
- const PreciseReferenceType& JavaLangClass() SHARED_REQUIRES(Locks::mutator_lock_);
- const PreciseReferenceType& JavaLangString() SHARED_REQUIRES(Locks::mutator_lock_);
- const RegType& JavaLangThrowable(bool precise) SHARED_REQUIRES(Locks::mutator_lock_);
- const RegType& JavaLangObject(bool precise) SHARED_REQUIRES(Locks::mutator_lock_);
+ const PreciseReferenceType& JavaLangClass() REQUIRES_SHARED(Locks::mutator_lock_);
+ const PreciseReferenceType& JavaLangString() REQUIRES_SHARED(Locks::mutator_lock_);
+ const RegType& JavaLangThrowable(bool precise) REQUIRES_SHARED(Locks::mutator_lock_);
+ const RegType& JavaLangObject(bool precise) REQUIRES_SHARED(Locks::mutator_lock_);
const UninitializedType& Uninitialized(const RegType& type, uint32_t allocation_pc)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Create an uninitialized 'this' argument for the given type.
const UninitializedType& UninitializedThisArgument(const RegType& type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const RegType& FromUninitialized(const RegType& uninit_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
- const ImpreciseConstType& ByteConstant() SHARED_REQUIRES(Locks::mutator_lock_);
- const ImpreciseConstType& CharConstant() SHARED_REQUIRES(Locks::mutator_lock_);
- const ImpreciseConstType& ShortConstant() SHARED_REQUIRES(Locks::mutator_lock_);
- const ImpreciseConstType& IntConstant() SHARED_REQUIRES(Locks::mutator_lock_);
- const ImpreciseConstType& PosByteConstant() SHARED_REQUIRES(Locks::mutator_lock_);
- const ImpreciseConstType& PosShortConstant() SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ const ImpreciseConstType& ByteConstant() REQUIRES_SHARED(Locks::mutator_lock_);
+ const ImpreciseConstType& CharConstant() REQUIRES_SHARED(Locks::mutator_lock_);
+ const ImpreciseConstType& ShortConstant() REQUIRES_SHARED(Locks::mutator_lock_);
+ const ImpreciseConstType& IntConstant() REQUIRES_SHARED(Locks::mutator_lock_);
+ const ImpreciseConstType& PosByteConstant() REQUIRES_SHARED(Locks::mutator_lock_);
+ const ImpreciseConstType& PosShortConstant() REQUIRES_SHARED(Locks::mutator_lock_);
const RegType& GetComponentType(const RegType& array, mirror::ClassLoader* loader)
- SHARED_REQUIRES(Locks::mutator_lock_);
- void Dump(std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void Dump(std::ostream& os) REQUIRES_SHARED(Locks::mutator_lock_);
const RegType& RegTypeFromPrimitiveType(Primitive::Type) const;
void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void VisitStaticRoots(RootVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void FillPrimitiveAndSmallConstantTypes() SHARED_REQUIRES(Locks::mutator_lock_);
+ void FillPrimitiveAndSmallConstantTypes() REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Class* ResolveClass(const char* descriptor, mirror::ClassLoader* loader)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool MatchDescriptor(size_t idx, const StringPiece& descriptor, bool precise)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const ConstantType& FromCat1NonSmallConstant(int32_t value, bool precise)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the pass in RegType.
template <class RegTypeType>
- RegTypeType& AddEntry(RegTypeType* new_entry) SHARED_REQUIRES(Locks::mutator_lock_);
+ RegTypeType& AddEntry(RegTypeType* new_entry) REQUIRES_SHARED(Locks::mutator_lock_);
// Add a string piece to the arena allocator so that it stays live for the lifetime of the
// verifier.
@@ -172,8 +172,8 @@
template <class Type>
static const Type* CreatePrimitiveTypeInstance(const std::string& descriptor)
- SHARED_REQUIRES(Locks::mutator_lock_);
- static void CreatePrimitiveAndSmallConstantTypes() SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ static void CreatePrimitiveAndSmallConstantTypes() REQUIRES_SHARED(Locks::mutator_lock_);
// A quick look up for popular small constants.
static constexpr int32_t kMinSmallConstant = -1;
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index 56846c1..7603a79 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -67,25 +67,25 @@
// Implement category-1 "move" instructions. Copy a 32-bit value from "vsrc" to "vdst".
void CopyRegister1(MethodVerifier* verifier, uint32_t vdst, uint32_t vsrc, TypeCategory cat)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Implement category-2 "move" instructions. Copy a 64-bit value from "vsrc" to "vdst". This
// copies both halves of the register.
void CopyRegister2(MethodVerifier* verifier, uint32_t vdst, uint32_t vsrc)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Implement "move-result". Copy the category-1 value from the result register to another
// register, and reset the result register.
void CopyResultRegister1(MethodVerifier* verifier, uint32_t vdst, bool is_reference)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Implement "move-result-wide". Copy the category-2 value from the result register to another
// register, and reset the result register.
void CopyResultRegister2(MethodVerifier* verifier, uint32_t vdst)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Set the invisible result register to unknown
- void SetResultTypeToUnknown(MethodVerifier* verifier) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetResultTypeToUnknown(MethodVerifier* verifier) REQUIRES_SHARED(Locks::mutator_lock_);
// Set the type of register N, verifying that the register is valid. If "newType" is the "Lo"
// part of a 64-bit value, register N+1 will be set to "newType+1".
@@ -102,20 +102,20 @@
ALWAYS_INLINE bool SetRegisterType(MethodVerifier* verifier,
uint32_t vdst,
const RegType& new_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool SetRegisterTypeWide(MethodVerifier* verifier,
uint32_t vdst,
const RegType& new_type1,
const RegType& new_type2)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/* Set the type of the "result" register. */
void SetResultRegisterType(MethodVerifier* verifier, const RegType& new_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SetResultRegisterTypeWide(const RegType& new_type1, const RegType& new_type2)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get the type of register vsrc.
const RegType& GetRegisterType(MethodVerifier* verifier, uint32_t vsrc) const;
@@ -123,13 +123,13 @@
ALWAYS_INLINE bool VerifyRegisterType(MethodVerifier* verifier,
uint32_t vsrc,
const RegType& check_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool VerifyRegisterTypeWide(MethodVerifier* verifier,
uint32_t vsrc,
const RegType& check_type1,
const RegType& check_type2)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CopyFromLine(const RegisterLine* src) {
DCHECK_EQ(num_regs_, src->num_regs_);
@@ -139,7 +139,7 @@
this_initialized_ = src->this_initialized_;
}
- std::string Dump(MethodVerifier* verifier) const SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump(MethodVerifier* verifier) const REQUIRES_SHARED(Locks::mutator_lock_);
void FillWithGarbage() {
memset(&line_, 0xf1, num_regs_ * sizeof(uint16_t));
@@ -154,7 +154,7 @@
* the new ones at the same time).
*/
void MarkUninitRefsAsInvalid(MethodVerifier* verifier, const RegType& uninit_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Update all registers holding "uninit_type" to instead hold the corresponding initialized
@@ -162,7 +162,7 @@
* the reference must be marked as initialized.
*/
void MarkRefsAsInitialized(MethodVerifier* verifier, const RegType& uninit_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Update all registers to be Conflict except vsrc.
@@ -219,7 +219,7 @@
const Instruction* inst,
bool is_range,
bool allow_failure = false)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Verify types for a simple two-register instruction (e.g. "neg-int").
@@ -229,7 +229,7 @@
const Instruction* inst,
const RegType& dst_type,
const RegType& src_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CheckUnaryOpWide(MethodVerifier* verifier,
const Instruction* inst,
@@ -237,21 +237,21 @@
const RegType& dst_type2,
const RegType& src_type1,
const RegType& src_type2)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CheckUnaryOpToWide(MethodVerifier* verifier,
const Instruction* inst,
const RegType& dst_type1,
const RegType& dst_type2,
const RegType& src_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CheckUnaryOpFromWide(MethodVerifier* verifier,
const Instruction* inst,
const RegType& dst_type,
const RegType& src_type1,
const RegType& src_type2)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Verify types for a simple three-register instruction (e.g. "add-int").
@@ -264,7 +264,7 @@
const RegType& src_type1,
const RegType& src_type2,
bool check_boolean_op)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CheckBinaryOpWide(MethodVerifier* verifier,
const Instruction* inst,
@@ -274,14 +274,14 @@
const RegType& src_type1_2,
const RegType& src_type2_1,
const RegType& src_type2_2)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CheckBinaryOpWideShift(MethodVerifier* verifier,
const Instruction* inst,
const RegType& long_lo_type,
const RegType& long_hi_type,
const RegType& int_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Verify types for a binary "2addr" operation. "src_type1"/"src_type2"
@@ -293,7 +293,7 @@
const RegType& src_type1,
const RegType& src_type2,
bool check_boolean_op)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CheckBinaryOp2addrWide(MethodVerifier* verifier,
const Instruction* inst,
@@ -303,14 +303,14 @@
const RegType& src_type1_2,
const RegType& src_type2_1,
const RegType& src_type2_2)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CheckBinaryOp2addrWideShift(MethodVerifier* verifier,
const Instruction* inst,
const RegType& long_lo_type,
const RegType& long_hi_type,
const RegType& int_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Verify types for A two-register instruction with a literal constant (e.g. "add-int/lit8").
@@ -324,15 +324,15 @@
const RegType& src_type,
bool check_boolean_op,
bool is_lit16)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Verify/push monitor onto the monitor stack, locking the value in reg_idx at location insn_idx.
void PushMonitor(MethodVerifier* verifier, uint32_t reg_idx, int32_t insn_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Verify/pop monitor from monitor stack ensuring that we believe the monitor is locked
void PopMonitor(MethodVerifier* verifier, uint32_t reg_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Stack of currently held monitors and where they were locked
size_t MonitorStackDepth() const {
@@ -344,7 +344,7 @@
void VerifyMonitorStackEmpty(MethodVerifier* verifier) const;
bool MergeRegisters(MethodVerifier* verifier, const RegisterLine* incoming_line)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
size_t GetMonitorEnterCount() const {
return monitors_.size();
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index b8e05b8..25c9424 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -38,7 +38,7 @@
static jmethodID StringInitToStringFactoryMethodID(jmethodID string_init);
static mirror::Class* ToClass(jclass global_jclass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static jclass com_android_dex_Dex;
static jclass dalvik_annotation_optimization_FastNative;
diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
index 5304590..6c16100 100644
--- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
@@ -34,10 +34,10 @@
} while (false);
struct ReferenceMap2Visitor : public CheckReferenceMapVisitor {
- explicit ReferenceMap2Visitor(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_)
+ explicit ReferenceMap2Visitor(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
: CheckReferenceMapVisitor(thread) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
if (CheckReferenceMapVisitor::VisitFrame()) {
return true;
}
diff --git a/test/004-StackWalk/stack_walk_jni.cc b/test/004-StackWalk/stack_walk_jni.cc
index 420224d..795f168 100644
--- a/test/004-StackWalk/stack_walk_jni.cc
+++ b/test/004-StackWalk/stack_walk_jni.cc
@@ -30,10 +30,10 @@
class TestReferenceMapVisitor : public CheckReferenceMapVisitor {
public:
- explicit TestReferenceMapVisitor(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_)
+ explicit TestReferenceMapVisitor(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
: CheckReferenceMapVisitor(thread) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
if (CheckReferenceMapVisitor::VisitFrame()) {
return true;
}
diff --git a/test/115-native-bridge/run b/test/115-native-bridge/run
index aeb5721..fb0b967 100644
--- a/test/115-native-bridge/run
+++ b/test/115-native-bridge/run
@@ -18,11 +18,11 @@
# Use libnativebridgetest as a native bridge, start NativeBridgeMain (Main is JniTest main file).
LIBPATH=$(echo ${ARGS} | sed -r 's/.*Djava.library.path=([^ ]*) .*/\1/')
-ln -s ${LIBPATH}/libnativebridgetest.so .
+ln -sf ${LIBPATH}/libnativebridgetest.so .
touch libarttest.so
touch libarttestd.so
-ln -s ${LIBPATH}/libarttest.so libarttest2.so
-ln -s ${LIBPATH}/libarttestd.so libarttestd2.so
+ln -sf ${LIBPATH}/libarttest.so libarttest2.so
+ln -sf ${LIBPATH}/libarttestd.so libarttestd2.so
# pwd likely has /, so it's a pain to put that into a sed rule.
LEFT=$(echo ${ARGS} | sed -r 's/-Djava.library.path.*//')
diff --git a/test/137-cfi/cfi.cc b/test/137-cfi/cfi.cc
index 45251b8..9f1499e 100644
--- a/test/137-cfi/cfi.cc
+++ b/test/137-cfi/cfi.cc
@@ -29,6 +29,7 @@
#include "base/logging.h"
#include "base/macros.h"
+#include "base/stringprintf.h"
#include "gc/heap.h"
#include "gc/space/image_space.h"
#include "oat_file.h"
@@ -55,7 +56,7 @@
// Keep pausing.
printf("Going to sleep\n");
for (;;) {
- pause();
+ sleep(1);
}
}
@@ -86,6 +87,19 @@
return false;
}
+
+static void MoreErrorInfo(pid_t pid, bool sig_quit_on_fail) {
+ printf("Secondary pid is %d\n", pid);
+
+ PrintFileToLog(StringPrintf("/proc/%d/maps", pid), ERROR);
+
+ if (sig_quit_on_fail) {
+ int res = kill(pid, SIGQUIT);
+ if (res != 0) {
+ PLOG(ERROR) << "Failed to send signal";
+ }
+ }
+}
#endif
// Currently we have to fall back to our own loader for the boot image when it's compiled PIC
@@ -254,10 +268,20 @@
result = CheckStack(bt.get(), full_signatrues ? full_seq : seq);
}
+ constexpr bool kSigQuitOnFail = true;
+ if (!result) {
+ MoreErrorInfo(pid, kSigQuitOnFail);
+ }
+
if (ptrace(PTRACE_DETACH, pid, 0, 0) != 0) {
PLOG(ERROR) << "Detach failed";
}
+ // If we failed to unwind and induced an ANR dump, give the child some time (20s).
+ if (!result && kSigQuitOnFail) {
+ sleep(20);
+ }
+
// Kill the other process once we are done with it.
kill(pid, SIGKILL);
diff --git a/test/137-cfi/src/Main.java b/test/137-cfi/src/Main.java
index 5cfe33d..1ec7072 100644
--- a/test/137-cfi/src/Main.java
+++ b/test/137-cfi/src/Main.java
@@ -101,9 +101,10 @@
}
// Wait until the forked process had time to run until its sleep phase.
+ BufferedReader lineReader;
try {
InputStreamReader stdout = new InputStreamReader(p.getInputStream(), "UTF-8");
- BufferedReader lineReader = new BufferedReader(stdout);
+ lineReader = new BufferedReader(stdout);
while (!lineReader.readLine().contains("Going to sleep")) {
}
} catch (Exception e) {
@@ -112,6 +113,26 @@
if (!unwindOtherProcess(fullSignatures, pid)) {
System.out.println("Unwinding other process failed.");
+
+ // In this case, log all the output.
+ // Note: this is potentially non-terminating code, if the secondary is totally stuck.
+ // We rely on the run-test timeout infrastructure to terminate the primary in
+ // such a case.
+ try {
+ String tmp;
+ System.out.println("Output from the secondary:");
+ while ((tmp = lineReader.readLine()) != null) {
+ System.out.println("Secondary: " + tmp);
+ }
+ } catch (Exception e) {
+ e.printStackTrace(System.out);
+ }
+ }
+
+ try {
+ lineReader.close();
+ } catch (Exception e) {
+ e.printStackTrace(System.out);
}
} finally {
// Kill the forked process if it is not already dead.
diff --git a/test/412-new-array/smali/fill_array_data.smali b/test/412-new-array/smali/fill_array_data.smali
index 2b24e56..f163084 100644
--- a/test/412-new-array/smali/fill_array_data.smali
+++ b/test/412-new-array/smali/fill_array_data.smali
@@ -2,6 +2,18 @@
.super Ljava/lang/Object;
+.method public static emptyIntArray([I)V
+ .registers 1
+
+ fill-array-data v0, :ArrayData
+ return-void
+
+:ArrayData
+ .array-data 4
+ .end array-data
+
+.end method
+
.method public static intArray([I)V
.registers 1
diff --git a/test/412-new-array/src/Main.java b/test/412-new-array/src/Main.java
index d95d2c5..fb348ba 100644
--- a/test/412-new-array/src/Main.java
+++ b/test/412-new-array/src/Main.java
@@ -220,6 +220,38 @@
public static void testSmaliFillArrayData() throws Exception {
Class<?> c = Class.forName("FillArrayData");
{
+ Method m = c.getMethod("emptyIntArray", int[].class);
+ int[] array = new int[0];
+ Object[] args = { array };
+ m.invoke(null, args);
+ assertEquals(0, array.length);
+
+ array = new int[2];
+ args[0] = array;
+ m.invoke(null, args);
+ // Test that nothing has been written to the array.
+ assertEquals(0, array[0]);
+ assertEquals(0, array[1]);
+
+ array = new int[] { 42, -42 };
+ args[0] = array;
+ m.invoke(null, args);
+ // Test that nothing has been written to the array.
+ assertEquals(42, array[0]);
+ assertEquals(-42, array[1]);
+
+ Throwable exception = null;
+ args[0] = null;
+ try {
+ m.invoke(null, args);
+ } catch (InvocationTargetException e) {
+ exception = e.getCause();
+ assertTrue(exception instanceof NullPointerException);
+ }
+ assertNotNull(exception);
+ }
+
+ {
Method m = c.getMethod("intArray", int[].class);
int[] array = new int[7];
Object[] args = { array };
@@ -235,7 +267,7 @@
array = new int[2];
args[0] = array;
- Throwable exception = null;
+ Throwable exception = null;
try {
m.invoke(null, args);
} catch (InvocationTargetException e) {
@@ -274,7 +306,7 @@
array = new int[2];
args[0] = array;
- Throwable exception = null;
+ Throwable exception = null;
try {
m.invoke(null, args);
} catch (InvocationTargetException e) {
@@ -313,7 +345,7 @@
array = new short[2];
args[0] = array;
- Throwable exception = null;
+ Throwable exception = null;
try {
m.invoke(null, args);
} catch (InvocationTargetException e) {
@@ -352,7 +384,7 @@
array = new long[2];
args[0] = array;
- Throwable exception = null;
+ Throwable exception = null;
try {
m.invoke(null, args);
} catch (InvocationTargetException e) {
@@ -391,7 +423,7 @@
array = new char[2];
args[0] = array;
- Throwable exception = null;
+ Throwable exception = null;
try {
m.invoke(null, args);
} catch (InvocationTargetException e) {
@@ -430,7 +462,7 @@
array = new byte[2];
args[0] = array;
- Throwable exception = null;
+ Throwable exception = null;
try {
m.invoke(null, args);
} catch (InvocationTargetException e) {
@@ -467,7 +499,7 @@
array = new boolean[2];
args[0] = array;
- Throwable exception = null;
+ Throwable exception = null;
try {
m.invoke(null, args);
} catch (InvocationTargetException e) {
diff --git a/test/442-checker-constant-folding/src/Main.java b/test/442-checker-constant-folding/src/Main.java
index 33ef10b..64180d5 100644
--- a/test/442-checker-constant-folding/src/Main.java
+++ b/test/442-checker-constant-folding/src/Main.java
@@ -27,6 +27,12 @@
}
}
+ public static void assertTrue(boolean condition) {
+ if (!condition) {
+ throw new Error();
+ }
+ }
+
public static void assertIntEquals(int expected, int result) {
if (expected != result) {
throw new Error("Expected: " + expected + ", found: " + result);
@@ -1322,6 +1328,58 @@
/**
+ * Test optimizations of comparisons with null yielding a constant result.
+ */
+
+ /// CHECK-START: boolean Main.ConstStringEqualsNull() constant_folding$after_inlining (before)
+ /// CHECK-DAG: <<ConstStr:l\d+>> LoadString
+ /// CHECK-DAG: <<Null:l\d+>> NullConstant
+ /// CHECK-DAG: <<Eq:z\d+>> Equal [<<ConstStr>>,<<Null>>]
+ /// CHECK-DAG: If [<<Eq>>]
+
+ /// CHECK-START: boolean Main.ConstStringEqualsNull() constant_folding$after_inlining (after)
+ /// CHECK-DAG: <<False:i\d+>> IntConstant 0
+ /// CHECK-DAG: If [<<False>>]
+
+ /// CHECK-START: boolean Main.ConstStringEqualsNull() constant_folding$after_inlining (after)
+ /// CHECK-NOT: Equal
+
+ public static boolean ConstStringEqualsNull() {
+ // Due to Jack emitting code using the opposite condition, use != to generate Equal.
+ if ($inline$ConstString() != null) {
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ /// CHECK-START: boolean Main.ConstStringNotEqualsNull() constant_folding$after_inlining (before)
+ /// CHECK-DAG: <<ConstStr:l\d+>> LoadString
+ /// CHECK-DAG: <<Null:l\d+>> NullConstant
+ /// CHECK-DAG: <<Ne:z\d+>> NotEqual [<<ConstStr>>,<<Null>>]
+ /// CHECK-DAG: If [<<Ne>>]
+
+ /// CHECK-START: boolean Main.ConstStringNotEqualsNull() constant_folding$after_inlining (after)
+ /// CHECK-DAG: <<True:i\d+>> IntConstant 1
+ /// CHECK-DAG: If [<<True>>]
+
+ /// CHECK-START: boolean Main.ConstStringNotEqualsNull() constant_folding$after_inlining (after)
+ /// CHECK-NOT: NotEqual
+
+ public static boolean ConstStringNotEqualsNull() {
+ // Due to Jack emitting code using the opposite condition, use == to generate NotEqual.
+ if ($inline$ConstString() == null) {
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ public static String $inline$ConstString() {
+ return "";
+ }
+
+ /**
* Exercise constant folding on type conversions.
*/
@@ -1601,6 +1659,9 @@
assertFalse(CmpFloatGreaterThanNaN(arbitrary));
assertFalse(CmpDoubleLessThanNaN(arbitrary));
+ assertFalse(ConstStringEqualsNull());
+ assertTrue(ConstStringNotEqualsNull());
+
Main main = new Main();
assertIntEquals(1, main.smaliCmpLongConstants());
assertIntEquals(-1, main.smaliCmpGtFloatConstants());
diff --git a/test/454-get-vreg/get_vreg_jni.cc b/test/454-get-vreg/get_vreg_jni.cc
index 30f9954..5762754 100644
--- a/test/454-get-vreg/get_vreg_jni.cc
+++ b/test/454-get-vreg/get_vreg_jni.cc
@@ -29,12 +29,12 @@
class TestVisitor : public StackVisitor {
public:
TestVisitor(Thread* thread, Context* context, mirror::Object* this_value)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
this_value_(this_value),
found_method_index_(0) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
diff --git a/test/457-regs/regs_jni.cc b/test/457-regs/regs_jni.cc
index 79fa8b0..08db775 100644
--- a/test/457-regs/regs_jni.cc
+++ b/test/457-regs/regs_jni.cc
@@ -29,10 +29,10 @@
class TestVisitor : public StackVisitor {
public:
TestVisitor(Thread* thread, Context* context)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
diff --git a/test/461-get-reference-vreg/get_reference_vreg_jni.cc b/test/461-get-reference-vreg/get_reference_vreg_jni.cc
index 8108c97..8122c6d 100644
--- a/test/461-get-reference-vreg/get_reference_vreg_jni.cc
+++ b/test/461-get-reference-vreg/get_reference_vreg_jni.cc
@@ -28,12 +28,12 @@
class TestVisitor : public StackVisitor {
public:
TestVisitor(Thread* thread, Context* context, mirror::Object* this_value)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
this_value_(this_value),
found_method_index_(0) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
diff --git a/test/466-get-live-vreg/get_live_vreg_jni.cc b/test/466-get-live-vreg/get_live_vreg_jni.cc
index 4f89e91..3618b4f 100644
--- a/test/466-get-live-vreg/get_live_vreg_jni.cc
+++ b/test/466-get-live-vreg/get_live_vreg_jni.cc
@@ -28,10 +28,10 @@
class TestVisitor : public StackVisitor {
public:
- TestVisitor(Thread* thread, Context* context) SHARED_REQUIRES(Locks::mutator_lock_)
+ TestVisitor(Thread* thread, Context* context) REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
diff --git a/test/534-checker-bce-deoptimization/expected.txt b/test/534-checker-bce-deoptimization/expected.txt
index 3823a29..b9a1e27 100644
--- a/test/534-checker-bce-deoptimization/expected.txt
+++ b/test/534-checker-bce-deoptimization/expected.txt
@@ -1 +1,5 @@
+array[0]=2.5f
+array[1]=2.625f
+array[0]=3.5
+array[1]=3.625
finish
diff --git a/test/534-checker-bce-deoptimization/src/Main.java b/test/534-checker-bce-deoptimization/src/Main.java
index 8cd20f6..c4e4cbf 100644
--- a/test/534-checker-bce-deoptimization/src/Main.java
+++ b/test/534-checker-bce-deoptimization/src/Main.java
@@ -17,6 +17,8 @@
public class Main {
public static void main(String[] args) {
new Main().run();
+ testPreserveFloat();
+ testPreserveDouble();
System.out.println("finish");
}
@@ -53,5 +55,77 @@
b[i + 1] += c * b[i + 1];
}
}
+
+ /*
+ * Test that we correctly preserve floating point registers when we deoptimize.
+ *
+ * Note: These tests rely on the deoptimization happening before the loop,
+ * so that the loop is interpreted and fills the provided arrays. However,
+ * the BCE transformation can be modified to execute the loop as many times
+ * as the compiler can guarantee no AIOOBE and only deoptimize thereafter,
+ * just before the throwing iteration. Then the floating point registers
+ * would no longer be used after the deoptimization and another approach
+ * would be needed to test this.
+ */
+
+ static public void testPreserveFloat() {
+ float[] array = new float[2];
+ try {
+ $noinline$FloatFill(1.125f, 2.5f, array, 3);
+ throw new Error();
+ } catch (ArrayIndexOutOfBoundsException expected) {
+ System.out.println("array[0]=" + array[0] + "f");
+ System.out.println("array[1]=" + array[1] + "f");
+ }
+ }
+
+ /// CHECK-START: void Main.$noinline$FloatFill(float, float, float[], int) BCE (after)
+ /// CHECK-DAG: Deoptimize
+ /// CHECK-DAG: Deoptimize
+ /// CHECK-DAG: Deoptimize
+ /// CHECK-NOT: Deoptimize
+
+ /// CHECK-START: void Main.$noinline$FloatFill(float, float, float[], int) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+
+ public static void $noinline$FloatFill(float f1, float f2, float[] array, int n) {
+ if (doThrow) { throw new Error(); }
+ for (int i = 0; i < n; ++i) {
+ array[i] = ((i & 1) == 1) ? f1 : f2;
+ f1 += 1.5f;
+ f2 += 2.25f;
+ }
+ }
+
+ static public void testPreserveDouble() {
+ double[] array = new double[2];
+ try {
+ $noinline$DoubleFill(2.125, 3.5, array, 3);
+ throw new Error();
+ } catch (ArrayIndexOutOfBoundsException expected) {
+ System.out.println("array[0]=" + array[0]);
+ System.out.println("array[1]=" + array[1]);
+ }
+ }
+
+ /// CHECK-START: void Main.$noinline$DoubleFill(double, double, double[], int) BCE (after)
+ /// CHECK-DAG: Deoptimize
+ /// CHECK-DAG: Deoptimize
+ /// CHECK-DAG: Deoptimize
+ /// CHECK-NOT: Deoptimize
+
+ /// CHECK-START: void Main.$noinline$DoubleFill(double, double, double[], int) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+
+ public static void $noinline$DoubleFill(double d1, double d2, double[] array, int n) {
+ if (doThrow) { throw new Error(); }
+ for (int i = 0; i < n; ++i) {
+ array[i] = ((i & 1) == 1) ? d1 : d2;
+ d1 += 1.5;
+ d2 += 2.25;
+ }
+ }
+
+ public static boolean doThrow = false;
}
diff --git a/test/538-checker-embed-constants/src/Main.java b/test/538-checker-embed-constants/src/Main.java
index f6713a2..04a12fa 100644
--- a/test/538-checker-embed-constants/src/Main.java
+++ b/test/538-checker-embed-constants/src/Main.java
@@ -102,12 +102,12 @@
/// CHECK-START-ARM: long Main.and255(long) disassembly (after)
/// CHECK-NOT: movs {{r\d+}}, #255
- /// CHECK-NOT: and
- /// CHECK-NOT: bic
+ /// CHECK-NOT: and{{(\.w)?}}
+ /// CHECK-NOT: bic{{(\.w)?}}
/// CHECK-DAG: and {{r\d+}}, {{r\d+}}, #255
/// CHECK-DAG: movs {{r\d+}}, #0
- /// CHECK-NOT: and
- /// CHECK-NOT: bic
+ /// CHECK-NOT: and{{(\.w)?}}
+ /// CHECK-NOT: bic{{(\.w)?}}
public static long and255(long arg) {
return arg & 255L;
@@ -115,12 +115,13 @@
/// CHECK-START-ARM: long Main.and511(long) disassembly (after)
/// CHECK: movw {{r\d+}}, #511
- /// CHECK-NOT: and
- /// CHECK-NOT: bic
- /// CHECK-DAG: and{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
- /// CHECK-DAG: movs {{r\d+}}, #0
- /// CHECK-NOT: and
- /// CHECK-NOT: bic
+ /// CHECK-NEXT: movs {{r\d+}}, #0
+ /// CHECK-NOT: and{{(\.w)?}}
+ /// CHECK-NOT: bic{{(\.w)?}}
+ /// CHECK: and{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+ /// CHECK-NEXT: and{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+ /// CHECK-NOT: and{{(\.w)?}}
+ /// CHECK-NOT: bic{{(\.w)?}}
public static long and511(long arg) {
return arg & 511L;
@@ -128,11 +129,11 @@
/// CHECK-START-ARM: long Main.andNot15(long) disassembly (after)
/// CHECK-NOT: mvn {{r\d+}}, #15
- /// CHECK-NOT: and
- /// CHECK-NOT: bic
+ /// CHECK-NOT: and{{(\.w)?}}
+ /// CHECK-NOT: bic{{(\.w)?}}
/// CHECK: bic {{r\d+}}, {{r\d+}}, #15
- /// CHECK-NOT: and
- /// CHECK-NOT: bic
+ /// CHECK-NOT: and{{(\.w)?}}
+ /// CHECK-NOT: bic{{(\.w)?}}
public static long andNot15(long arg) {
return arg & ~15L;
@@ -141,12 +142,12 @@
/// CHECK-START-ARM: long Main.and0xfffffff00000000f(long) disassembly (after)
/// CHECK-NOT: movs {{r\d+}}, #15
/// CHECK-NOT: mvn {{r\d+}}, #15
- /// CHECK-NOT: and
- /// CHECK-NOT: bic
+ /// CHECK-NOT: and{{(\.w)?}}
+ /// CHECK-NOT: bic{{(\.w)?}}
/// CHECK-DAG: and {{r\d+}}, {{r\d+}}, #15
/// CHECK-DAG: bic {{r\d+}}, {{r\d+}}, #15
- /// CHECK-NOT: and
- /// CHECK-NOT: bic
+ /// CHECK-NOT: and{{(\.w)?}}
+ /// CHECK-NOT: bic{{(\.w)?}}
public static long and0xfffffff00000000f(long arg) {
return arg & 0xfffffff00000000fL;
@@ -154,10 +155,10 @@
/// CHECK-START-ARM: long Main.or255(long) disassembly (after)
/// CHECK-NOT: movs {{r\d+}}, #255
- /// CHECK-NOT: orr
+ /// CHECK-NOT: orr{{(\.w)?}}
/// CHECK-NOT: orn
/// CHECK: orr {{r\d+}}, {{r\d+}}, #255
- /// CHECK-NOT: orr
+ /// CHECK-NOT: orr{{(\.w)?}}
/// CHECK-NOT: orn
public static long or255(long arg) {
@@ -166,10 +167,12 @@
/// CHECK-START-ARM: long Main.or511(long) disassembly (after)
/// CHECK: movw {{r\d+}}, #511
- /// CHECK-NOT: orr
+ /// CHECK-NEXT: movs {{r\d+}}, #0
+ /// CHECK-NOT: orr{{(\.w)?}}
/// CHECK-NOT: orn
/// CHECK: orr{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
- /// CHECK-NOT: orr
+ /// CHECK-NEXT: orr{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+ /// CHECK-NOT: orr{{(\.w)?}}
/// CHECK-NOT: orn
public static long or511(long arg) {
@@ -178,11 +181,11 @@
/// CHECK-START-ARM: long Main.orNot15(long) disassembly (after)
/// CHECK-NOT: mvn {{r\d+}}, #15
- /// CHECK-NOT: orr
+ /// CHECK-NOT: orr{{(\.w)?}}
/// CHECK-NOT: orn
/// CHECK-DAG: orn {{r\d+}}, {{r\d+}}, #15
/// CHECK-DAG: mvn {{r\d+}}, #0
- /// CHECK-NOT: orr
+ /// CHECK-NOT: orr{{(\.w)?}}
/// CHECK-NOT: orn
public static long orNot15(long arg) {
@@ -192,11 +195,11 @@
/// CHECK-START-ARM: long Main.or0xfffffff00000000f(long) disassembly (after)
/// CHECK-NOT: movs {{r\d+}}, #15
/// CHECK-NOT: mvn {{r\d+}}, #15
- /// CHECK-NOT: orr
+ /// CHECK-NOT: orr{{(\.w)?}}
/// CHECK-NOT: orn
/// CHECK-DAG: orr {{r\d+}}, {{r\d+}}, #15
/// CHECK-DAG: orn {{r\d+}}, {{r\d+}}, #15
- /// CHECK-NOT: orr
+ /// CHECK-NOT: orr{{(\.w)?}}
/// CHECK-NOT: orn
public static long or0xfffffff00000000f(long arg) {
@@ -205,9 +208,9 @@
/// CHECK-START-ARM: long Main.xor255(long) disassembly (after)
/// CHECK-NOT: movs {{r\d+}}, #255
- /// CHECK-NOT: eor
+ /// CHECK-NOT: eor{{(\.w)?}}
/// CHECK: eor {{r\d+}}, {{r\d+}}, #255
- /// CHECK-NOT: eor
+ /// CHECK-NOT: eor{{(\.w)?}}
public static long xor255(long arg) {
return arg ^ 255L;
@@ -215,9 +218,11 @@
/// CHECK-START-ARM: long Main.xor511(long) disassembly (after)
/// CHECK: movw {{r\d+}}, #511
- /// CHECK-NOT: eor
- /// CHECK-DAG: eor{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
- /// CHECK-NOT: eor
+ /// CHECK-NEXT: movs {{r\d+}}, #0
+ /// CHECK-NOT: eor{{(\.w)?}}
+ /// CHECK: eor{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+ /// CHECK-NEXT: eor{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+ /// CHECK-NOT: eor{{(\.w)?}}
public static long xor511(long arg) {
return arg ^ 511L;
@@ -226,10 +231,10 @@
/// CHECK-START-ARM: long Main.xorNot15(long) disassembly (after)
/// CHECK-DAG: mvn {{r\d+}}, #15
/// CHECK-DAG: mov.w {{r\d+}}, #-1
- /// CHECK-NOT: eor
+ /// CHECK-NOT: eor{{(\.w)?}}
/// CHECK-DAG: eor{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
/// CHECK-DAG: eor{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
- /// CHECK-NOT: eor
+ /// CHECK-NOT: eor{{(\.w)?}}
public static long xorNot15(long arg) {
return arg ^ ~15L;
@@ -239,10 +244,10 @@
/// CHECK-START-ARM: long Main.xor0xfffffff00000000f(long) disassembly (after)
/// CHECK-DAG: movs {{r\d+}}, #15
/// CHECK-DAG: mvn {{r\d+}}, #15
- /// CHECK-NOT: eor
+ /// CHECK-NOT: eor{{(\.w)?}}
/// CHECK-DAG: eor{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
/// CHECK-DAG: eor{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
- /// CHECK-NOT: eor
+ /// CHECK-NOT: eor{{(\.w)?}}
public static long xor0xfffffff00000000f(long arg) {
return arg ^ 0xfffffff00000000fL;
@@ -251,10 +256,10 @@
/// CHECK-START-ARM: long Main.xor0xf00000000000000f(long) disassembly (after)
/// CHECK-NOT: movs {{r\d+}}, #15
/// CHECK-NOT: mov.w {{r\d+}}, #-268435456
- /// CHECK-NOT: eor
+ /// CHECK-NOT: eor{{(\.w)?}}
/// CHECK-DAG: eor {{r\d+}}, {{r\d+}}, #15
/// CHECK-DAG: eor {{r\d+}}, {{r\d+}}, #-268435456
- /// CHECK-NOT: eor
+ /// CHECK-NOT: eor{{(\.w)?}}
public static long xor0xf00000000000000f(long arg) {
return arg ^ 0xf00000000000000fL;
diff --git a/test/543-env-long-ref/env_long_ref.cc b/test/543-env-long-ref/env_long_ref.cc
index 4108323..557def6 100644
--- a/test/543-env-long-ref/env_long_ref.cc
+++ b/test/543-env-long-ref/env_long_ref.cc
@@ -28,13 +28,13 @@
class TestVisitor : public StackVisitor {
public:
TestVisitor(const ScopedObjectAccess& soa, Context* context, jobject expected_value)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(soa.Self(), context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
expected_value_(expected_value),
found_(false),
soa_(soa) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
diff --git a/test/563-checker-fakestring/smali/TestCase.smali b/test/563-checker-fakestring/smali/TestCase.smali
index 54312a4..9f86352 100644
--- a/test/563-checker-fakestring/smali/TestCase.smali
+++ b/test/563-checker-fakestring/smali/TestCase.smali
@@ -42,16 +42,19 @@
# Test usage of String new-instance before it is initialized.
## CHECK-START: void TestCase.compareNewInstance() register (after)
-## CHECK-DAG: <<Null:l\d+>> NullConstant
+## CHECK-DAG: <<Null:l\d+>> InvokeStaticOrDirect method_name:Main.$noinline$HiddenNull
## CHECK-DAG: <<String:l\d+>> NewInstance
-## CHECK-DAG: <<Cond:z\d+>> NotEqual [<<String>>,<<Null>>]
+## CHECK-DAG: <<Cond:z\d+>> NotEqual [<<Null>>,<<String>>]
## CHECK-DAG: If [<<Cond>>]
.method public static compareNewInstance()V
.registers 3
+ invoke-static {}, LMain;->$noinline$HiddenNull()Ljava/lang/Object;
+ move-result-object v1
+
new-instance v0, Ljava/lang/String;
- if-nez v0, :return
+ if-ne v0, v1, :return
# Will throw NullPointerException if this branch is taken.
const v1, 0x0
diff --git a/test/563-checker-fakestring/src/Main.java b/test/563-checker-fakestring/src/Main.java
index 1ac8a5b..78cb37a 100644
--- a/test/563-checker-fakestring/src/Main.java
+++ b/test/563-checker-fakestring/src/Main.java
@@ -79,4 +79,11 @@
assertEqual(testString, result);
}
}
+
+ public static boolean doThrow = false;
+
+ public static Object $noinline$HiddenNull() {
+ if (doThrow) { throw new Error(); }
+ return null;
+ }
}
diff --git a/test/570-checker-osr/osr.cc b/test/570-checker-osr/osr.cc
index cf413ba..adda3cc 100644
--- a/test/570-checker-osr/osr.cc
+++ b/test/570-checker-osr/osr.cc
@@ -28,13 +28,13 @@
class OsrVisitor : public StackVisitor {
public:
explicit OsrVisitor(Thread* thread, const char* method_name)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
method_name_(method_name),
in_osr_method_(false),
in_interpreter_(false) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
@@ -90,11 +90,11 @@
class ProfilingInfoVisitor : public StackVisitor {
public:
explicit ProfilingInfoVisitor(Thread* thread, const char* method_name)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
method_name_(method_name) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
@@ -124,11 +124,11 @@
class OsrCheckVisitor : public StackVisitor {
public:
OsrCheckVisitor(Thread* thread, const char* method_name)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
method_name_(method_name) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
diff --git a/test/595-profile-saving/profile-saving.cc b/test/595-profile-saving/profile-saving.cc
index 0d26f45..a265dce 100644
--- a/test/595-profile-saving/profile-saving.cc
+++ b/test/595-profile-saving/profile-saving.cc
@@ -34,11 +34,11 @@
class CreateProfilingInfoVisitor : public StackVisitor {
public:
explicit CreateProfilingInfoVisitor(Thread* thread, const char* method_name)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
method_name_(method_name) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
diff --git a/test/901-hello-ti-agent/basics.cc b/test/901-hello-ti-agent/basics.cc
index 81a1b66..3a475c6 100644
--- a/test/901-hello-ti-agent/basics.cc
+++ b/test/901-hello-ti-agent/basics.cc
@@ -35,7 +35,7 @@
#define CHECK_CALL_SUCCESS(c) \
do { \
- if (c != JNI_OK) { \
+ if ((c) != JNI_OK) { \
printf("call " #c " did not succeed\n"); \
return -1; \
} \
diff --git a/test/Transaction/Transaction.java b/test/Transaction/Transaction.java
index 00e1fbb..e7085c1 100644
--- a/test/Transaction/Transaction.java
+++ b/test/Transaction/Transaction.java
@@ -18,6 +18,10 @@
static class EmptyStatic {
}
+ static class ResolveString {
+ static String s = "ResolvedString";
+ }
+
static class StaticFieldClass {
public static int intField;
static {
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index ee2ee1a..fd1ba02 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -22,12 +22,10 @@
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "mirror/class-inl.h"
-#include "nth_caller_visitor.h"
#include "oat_quick_method_header.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
#include "ScopedUtfChars.h"
-#include "stack.h"
#include "thread-inl.h"
namespace art {
diff --git a/test/valgrind-suppressions.txt b/test/valgrind-suppressions.txt
index acab6e5..fd3c331 100644
--- a/test/valgrind-suppressions.txt
+++ b/test/valgrind-suppressions.txt
@@ -13,3 +13,12 @@
fun:_dl_start
obj:/lib/x86_64-linux-gnu/ld-2.19.so
}
+
+{
+ b/31275764
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ ...
+ fun:_ZN3art7Runtime17InitNativeMethodsEv
+}
diff --git a/test/valgrind-target-suppressions.txt b/test/valgrind-target-suppressions.txt
index 7ae6d53..fbc99b1 100644
--- a/test/valgrind-target-suppressions.txt
+++ b/test/valgrind-target-suppressions.txt
@@ -50,3 +50,12 @@
fun:malloc
fun:setenv
}
+
+{
+ b/31275764
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ ...
+ fun:_ZN3art7Runtime17InitNativeMethodsEv
+}
diff --git a/tools/art b/tools/art
index d91b451..1a3bba7 100644
--- a/tools/art
+++ b/tools/art
@@ -87,7 +87,7 @@
fi
if [ z"$PERF" != z ]; then
- invoke_with="perf record -o $ANDROID_DATA/perf.data -e cycles:u $invoke_with"
+ invoke_with="perf record -g -o $ANDROID_DATA/perf.data -e cycles:u $invoke_with"
DEBUG_OPTION="-Xcompiler-option --generate-debug-info"
fi
diff --git a/tools/bisection-search/README.md b/tools/bisection-search/README.md
deleted file mode 100644
index 857c930..0000000
--- a/tools/bisection-search/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-Bisection Bug Search
-====================
-
-Bisection Bug Search is a tool for finding compiler optimizations bugs. It
-accepts a program which exposes a bug by producing incorrect output and expected
-output for the program. It then attempts to narrow down the issue to a single
-method and optimization pass under the assumption that interpreter is correct.
-
-Given methods in order M0..Mn finds smallest i such that compiling Mi and
-interpreting all other methods produces incorrect output. Then, given ordered
-optimization passes P0..Pl, finds smallest j such that compiling Mi with passes
-P0..Pj-1 produces expected output and compiling Mi with passes P0..Pj produces
-incorrect output. Prints Mi and Pj.
-
-How to run Bisection Bug Search
-===============================
-
- bisection_search.py [-h] -cp CLASSPATH
- [--expected-output EXPECTED_OUTPUT] [--device]
- [--lib LIB] [--64]
- [--dalvikvm-option [OPTION [OPTION ...]]]
- [--arg [TEST_ARGS [TEST_ARGS ...]]] [--image IMAGE]
- [--verbose]
- classname
-
- positional arguments:
- classname name of class to run
-
- optional arguments:
- -h, --help show this help message and exit
- -cp CLASSPATH, --classpath CLASSPATH
- classpath
- --expected-output EXPECTED_OUTPUT
- file containing expected output
- --device run on device
- --lib LIB lib to use, default: libart.so
- --64 x64 mode
- --dalvikvm-option [OPTION [OPTION ...]]
- additional dalvikvm option
- --arg [TEST_ARGS [TEST_ARGS ...]]
- argument to pass to program
- --image IMAGE path to image
- --verbose enable verbose output
diff --git a/tools/bisection-search/bisection_search.py b/tools/bisection-search/bisection_search.py
deleted file mode 100755
index d6c1749..0000000
--- a/tools/bisection-search/bisection_search.py
+++ /dev/null
@@ -1,300 +0,0 @@
-#!/usr/bin/env python3.4
-#
-# Copyright (C) 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Performs bisection bug search on methods and optimizations.
-
-See README.md.
-
-Example usage:
-./bisection-search.py -cp classes.dex --expected-output output Test
-"""
-
-import argparse
-import re
-import sys
-
-from common import DeviceTestEnv
-from common import FatalError
-from common import GetEnvVariableOrError
-from common import HostTestEnv
-
-# Passes that are never disabled during search process because disabling them
-# would compromise correctness.
-MANDATORY_PASSES = ['dex_cache_array_fixups_arm',
- 'dex_cache_array_fixups_mips',
- 'instruction_simplifier$before_codegen',
- 'pc_relative_fixups_x86',
- 'pc_relative_fixups_mips',
- 'x86_memory_operand_generation']
-
-# Passes that show up as optimizations in compiler verbose output but aren't
-# driven by run-passes mechanism. They are mandatory and will always run, we
-# never pass them to --run-passes.
-NON_PASSES = ['builder', 'prepare_for_register_allocation',
- 'liveness', 'register']
-
-
-class Dex2OatWrapperTestable(object):
- """Class representing a testable compilation.
-
- Accepts filters on compiled methods and optimization passes.
- """
-
- def __init__(self, base_cmd, test_env, class_name, args,
- expected_output=None, verbose=False):
- """Constructor.
-
- Args:
- base_cmd: list of strings, base command to run.
- test_env: ITestEnv.
- class_name: string, name of class to run.
- args: list of strings, program arguments to pass.
- expected_output: string, expected output to compare against or None.
- verbose: bool, enable verbose output.
- """
- self._base_cmd = base_cmd
- self._test_env = test_env
- self._class_name = class_name
- self._args = args
- self._expected_output = expected_output
- self._compiled_methods_path = self._test_env.CreateFile('compiled_methods')
- self._passes_to_run_path = self._test_env.CreateFile('run_passes')
- self._verbose = verbose
-
- def Test(self, compiled_methods, passes_to_run=None):
- """Tests compilation with compiled_methods and run_passes switches active.
-
- If compiled_methods is None then compiles all methods.
- If passes_to_run is None then runs default passes.
-
- Args:
- compiled_methods: list of strings representing methods to compile or None.
- passes_to_run: list of strings representing passes to run or None.
-
- Returns:
- True if test passes with given settings. False otherwise.
- """
- if self._verbose:
- print('Testing methods: {0} passes:{1}.'.format(
- compiled_methods, passes_to_run))
- cmd = self._PrepareCmd(compiled_methods=compiled_methods,
- passes_to_run=passes_to_run,
- verbose_compiler=True)
- (output, _, ret_code) = self._test_env.RunCommand(cmd)
- res = ret_code == 0 and (self._expected_output is None
- or output == self._expected_output)
- if self._verbose:
- print('Test passed: {0}.'.format(res))
- return res
-
- def GetAllMethods(self):
- """Get methods compiled during the test.
-
- Returns:
- List of strings representing methods compiled during the test.
-
- Raises:
- FatalError: An error occurred when retrieving methods list.
- """
- cmd = self._PrepareCmd(verbose_compiler=True)
- (_, err_output, _) = self._test_env.RunCommand(cmd)
- match_methods = re.findall(r'Building ([^\n]+)\n', err_output)
- if not match_methods:
- raise FatalError('Failed to retrieve methods list. '
- 'Not recognized output format.')
- return match_methods
-
- def GetAllPassesForMethod(self, compiled_method):
- """Get all optimization passes ran for a method during the test.
-
- Args:
- compiled_method: string representing method to compile.
-
- Returns:
- List of strings representing passes ran for compiled_method during test.
-
- Raises:
- FatalError: An error occurred when retrieving passes list.
- """
- cmd = self._PrepareCmd(compiled_methods=[compiled_method],
- verbose_compiler=True)
- (_, err_output, _) = self._test_env.RunCommand(cmd)
- match_passes = re.findall(r'Starting pass: ([^\n]+)\n', err_output)
- if not match_passes:
- raise FatalError('Failed to retrieve passes list. '
- 'Not recognized output format.')
- return [p for p in match_passes if p not in NON_PASSES]
-
- def _PrepareCmd(self, compiled_methods=None, passes_to_run=None,
- verbose_compiler=False):
- """Prepare command to run."""
- cmd = list(self._base_cmd)
- if compiled_methods is not None:
- self._test_env.WriteLines(self._compiled_methods_path, compiled_methods)
- cmd += ['-Xcompiler-option', '--compiled-methods={0}'.format(
- self._compiled_methods_path)]
- if passes_to_run is not None:
- self._test_env.WriteLines(self._passes_to_run_path, passes_to_run)
- cmd += ['-Xcompiler-option', '--run-passes={0}'.format(
- self._passes_to_run_path)]
- if verbose_compiler:
- cmd += ['-Xcompiler-option', '--runtime-arg', '-Xcompiler-option',
- '-verbose:compiler']
- cmd += ['-classpath', self._test_env.classpath, self._class_name]
- cmd += self._args
- return cmd
-
-
-def BinarySearch(start, end, test):
- """Binary search integers using test function to guide the process."""
- while start < end:
- mid = (start + end) // 2
- if test(mid):
- start = mid + 1
- else:
- end = mid
- return start
-
-
-def FilterPasses(passes, cutoff_idx):
- """Filters passes list according to cutoff_idx but keeps mandatory passes."""
- return [opt_pass for idx, opt_pass in enumerate(passes)
- if opt_pass in MANDATORY_PASSES or idx < cutoff_idx]
-
-
-def BugSearch(testable):
- """Find buggy (method, optimization pass) pair for a given testable.
-
- Args:
- testable: Dex2OatWrapperTestable.
-
- Returns:
- (string, string) tuple. First element is name of method which when compiled
- exposes test failure. Second element is name of optimization pass such that
- for aforementioned method running all passes up to and excluding the pass
- results in test passing but running all passes up to and including the pass
- results in test failing.
-
- (None, None) if test passes when compiling all methods.
- (string, None) if a method is found which exposes the failure, but the
- failure happens even when running just mandatory passes.
-
- Raises:
- FatalError: Testable fails with no methods compiled.
- AssertionError: Method failed for all passes when bisecting methods, but
- passed when bisecting passes. Possible sporadic failure.
- """
- all_methods = testable.GetAllMethods()
- faulty_method_idx = BinarySearch(
- 0,
- len(all_methods),
- lambda mid: testable.Test(all_methods[0:mid]))
- if faulty_method_idx == len(all_methods):
- return (None, None)
- if faulty_method_idx == 0:
- raise FatalError('Testable fails with no methods compiled. '
- 'Perhaps issue lies outside of compiler.')
- faulty_method = all_methods[faulty_method_idx - 1]
- all_passes = testable.GetAllPassesForMethod(faulty_method)
- faulty_pass_idx = BinarySearch(
- 0,
- len(all_passes),
- lambda mid: testable.Test([faulty_method],
- FilterPasses(all_passes, mid)))
- if faulty_pass_idx == 0:
- return (faulty_method, None)
- assert faulty_pass_idx != len(all_passes), 'Method must fail for some passes.'
- faulty_pass = all_passes[faulty_pass_idx - 1]
- return (faulty_method, faulty_pass)
-
-
-def PrepareParser():
- """Prepares argument parser."""
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '-cp', '--classpath', required=True, type=str, help='classpath')
- parser.add_argument('--expected-output', type=str,
- help='file containing expected output')
- parser.add_argument(
- '--device', action='store_true', default=False, help='run on device')
- parser.add_argument('classname', type=str, help='name of class to run')
- parser.add_argument('--lib', dest='lib', type=str, default='libart.so',
- help='lib to use, default: libart.so')
- parser.add_argument('--64', dest='x64', action='store_true',
- default=False, help='x64 mode')
- parser.add_argument('--dalvikvm-option', dest='dalvikvm_opts',
- metavar='OPTION', nargs='*', default=[],
- help='additional dalvikvm option')
- parser.add_argument('--arg', dest='test_args', nargs='*', default=[],
- help='argument to pass to program')
- parser.add_argument('--image', type=str, help='path to image')
- parser.add_argument('--verbose', action='store_true',
- default=False, help='enable verbose output')
- return parser
-
-
-def main():
- # Parse arguments
- parser = PrepareParser()
- args = parser.parse_args()
-
- # Prepare environment
- if args.expected_output is not None:
- with open(args.expected_output, 'r') as f:
- expected_output = f.read()
- else:
- expected_output = None
- if args.device:
- run_cmd = ['dalvikvm64'] if args.x64 else ['dalvikvm32']
- test_env = DeviceTestEnv(args.classpath)
- else:
- run_cmd = ['dalvikvm64'] if args.x64 else ['dalvikvm32']
- run_cmd += ['-XXlib:{0}'.format(args.lib)]
- if not args.image:
- image_path = '{0}/framework/core-optimizing-pic.art'.format(
- GetEnvVariableOrError('ANDROID_HOST_OUT'))
- else:
- image_path = args.image
- run_cmd += ['-Ximage:{0}'.format(image_path)]
- if args.dalvikvm_opts:
- run_cmd += args.dalvikvm_opts
- test_env = HostTestEnv(args.classpath, args.x64)
-
- # Perform the search
- try:
- testable = Dex2OatWrapperTestable(run_cmd, test_env, args.classname,
- args.test_args, expected_output,
- args.verbose)
- (method, opt_pass) = BugSearch(testable)
- except Exception as e:
- print('Error. Refer to logfile: {0}'.format(test_env.logfile.name))
- test_env.logfile.write('Exception: {0}\n'.format(e))
- raise
-
- # Report results
- if method is None:
- print('Couldn\'t find any bugs.')
- elif opt_pass is None:
- print('Faulty method: {0}. Fails with just mandatory passes.'.format(
- method))
- else:
- print('Faulty method and pass: {0}, {1}.'.format(method, opt_pass))
- print('Logfile: {0}'.format(test_env.logfile.name))
- sys.exit(0)
-
-
-if __name__ == '__main__':
- main()
diff --git a/tools/bisection_search/README.md b/tools/bisection_search/README.md
new file mode 100644
index 0000000..a7485c2
--- /dev/null
+++ b/tools/bisection_search/README.md
@@ -0,0 +1,43 @@
+Bisection Bug Search
+====================
+
+Bisection Bug Search is a tool for finding compiler optimizations bugs. It
+accepts a program which exposes a bug by producing incorrect output and expected
+output for the program. It then attempts to narrow down the issue to a single
+method and optimization pass under the assumption that interpreter is correct.
+
+Given methods in order M0..Mn finds smallest i such that compiling Mi and
+interpreting all other methods produces incorrect output. Then, given ordered
+optimization passes P0..Pl, finds smallest j such that compiling Mi with passes
+P0..Pj-1 produces expected output and compiling Mi with passes P0..Pj produces
+incorrect output. Prints Mi and Pj.
+
+How to run Bisection Bug Search
+===============================
+
+ bisection_search.py [-h] [-cp CLASSPATH] [--class CLASSNAME] [--lib LIB]
+ [--dalvikvm-option [OPT [OPT ...]]] [--arg [ARG [ARG ...]]]
+ [--image IMAGE] [--raw-cmd RAW_CMD]
+ [--64] [--device] [--expected-output EXPECTED_OUTPUT]
+ [--check-script CHECK_SCRIPT] [--verbose]
+
+ Tool for finding compiler bugs. Either --raw-cmd or both -cp and --class are required.
+
+ optional arguments:
+ -h, --help show this help message and exit
+
+ dalvikvm command options:
+ -cp CLASSPATH, --classpath CLASSPATH classpath
+ --class CLASSNAME name of main class
+ --lib LIB lib to use, default: libart.so
+ --dalvikvm-option [OPT [OPT ...]] additional dalvikvm option
+ --arg [ARG [ARG ...]] argument passed to test
+ --image IMAGE path to image
+ --raw-cmd RAW_CMD bisect with this command, ignore other command options
+
+ bisection options:
+ --64 x64 mode
+ --device run on device
+ --expected-output EXPECTED_OUTPUT file containing expected output
+ --check-script CHECK_SCRIPT script comparing output and expected output
+ --verbose enable verbose output
diff --git a/tools/bisection_search/__init__.py b/tools/bisection_search/__init__.py
new file mode 100644
index 0000000..0a42789
--- /dev/null
+++ b/tools/bisection_search/__init__.py
@@ -0,0 +1,17 @@
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This file is intentionally left empty. It indicates that the directory is a Python package.
diff --git a/tools/bisection_search/bisection_search.py b/tools/bisection_search/bisection_search.py
new file mode 100755
index 0000000..110ef82
--- /dev/null
+++ b/tools/bisection_search/bisection_search.py
@@ -0,0 +1,396 @@
+#!/usr/bin/env python3.4
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Performs bisection bug search on methods and optimizations.
+
+See README.md.
+
+Example usage:
+./bisection-search.py -cp classes.dex --expected-output output Test
+"""
+
+import abc
+import argparse
+import re
+import shlex
+from subprocess import call
+import sys
+from tempfile import NamedTemporaryFile
+
+from common import DeviceTestEnv
+from common import FatalError
+from common import GetEnvVariableOrError
+from common import HostTestEnv
+
+
+# Passes that are never disabled during search process because disabling them
+# would compromise correctness.
+MANDATORY_PASSES = ['dex_cache_array_fixups_arm',
+ 'dex_cache_array_fixups_mips',
+ 'instruction_simplifier$before_codegen',
+ 'pc_relative_fixups_x86',
+ 'pc_relative_fixups_mips',
+ 'x86_memory_operand_generation']
+
+# Passes that show up as optimizations in compiler verbose output but aren't
+# driven by run-passes mechanism. They are mandatory and will always run, we
+# never pass them to --run-passes.
+NON_PASSES = ['builder', 'prepare_for_register_allocation',
+ 'liveness', 'register']
+
+
+class Dex2OatWrapperTestable(object):
+ """Class representing a testable compilation.
+
+ Accepts filters on compiled methods and optimization passes.
+ """
+
+ def __init__(self, base_cmd, test_env, output_checker=None, verbose=False):
+ """Constructor.
+
+ Args:
+ base_cmd: list of strings, base command to run.
+ test_env: ITestEnv.
+ output_checker: IOutputCheck, output checker.
+ verbose: bool, enable verbose output.
+ """
+ self._base_cmd = base_cmd
+ self._test_env = test_env
+ self._output_checker = output_checker
+ self._compiled_methods_path = self._test_env.CreateFile('compiled_methods')
+ self._passes_to_run_path = self._test_env.CreateFile('run_passes')
+ self._verbose = verbose
+
+ def Test(self, compiled_methods, passes_to_run=None):
+ """Tests compilation with compiled_methods and run_passes switches active.
+
+ If compiled_methods is None then compiles all methods.
+ If passes_to_run is None then runs default passes.
+
+ Args:
+ compiled_methods: list of strings representing methods to compile or None.
+ passes_to_run: list of strings representing passes to run or None.
+
+ Returns:
+ True if test passes with given settings. False otherwise.
+ """
+ if self._verbose:
+ print('Testing methods: {0} passes: {1}.'.format(
+ compiled_methods, passes_to_run))
+ cmd = self._PrepareCmd(compiled_methods=compiled_methods,
+ passes_to_run=passes_to_run,
+ verbose_compiler=False)
+ (output, ret_code) = self._test_env.RunCommand(
+ cmd, {'ANDROID_LOG_TAGS': '*:e'})
+ res = ((self._output_checker is None and ret_code == 0)
+ or self._output_checker.Check(output))
+ if self._verbose:
+ print('Test passed: {0}.'.format(res))
+ return res
+
+ def GetAllMethods(self):
+ """Get methods compiled during the test.
+
+ Returns:
+ List of strings representing methods compiled during the test.
+
+ Raises:
+ FatalError: An error occurred when retrieving methods list.
+ """
+ cmd = self._PrepareCmd(verbose_compiler=True)
+ (output, _) = self._test_env.RunCommand(cmd, {'ANDROID_LOG_TAGS': '*:i'})
+ match_methods = re.findall(r'Building ([^\n]+)\n', output)
+ if not match_methods:
+ raise FatalError('Failed to retrieve methods list. '
+ 'Not recognized output format.')
+ return match_methods
+
+ def GetAllPassesForMethod(self, compiled_method):
+ """Get all optimization passes ran for a method during the test.
+
+ Args:
+ compiled_method: string representing method to compile.
+
+ Returns:
+ List of strings representing passes ran for compiled_method during test.
+
+ Raises:
+ FatalError: An error occurred when retrieving passes list.
+ """
+ cmd = self._PrepareCmd(compiled_methods=[compiled_method],
+ verbose_compiler=True)
+ (output, _) = self._test_env.RunCommand(cmd, {'ANDROID_LOG_TAGS': '*:i'})
+ match_passes = re.findall(r'Starting pass: ([^\n]+)\n', output)
+ if not match_passes:
+ raise FatalError('Failed to retrieve passes list. '
+ 'Not recognized output format.')
+ return [p for p in match_passes if p not in NON_PASSES]
+
+ def _PrepareCmd(self, compiled_methods=None, passes_to_run=None,
+ verbose_compiler=False):
+ """Prepare command to run."""
+ cmd = [self._base_cmd[0]]
+ # insert additional arguments
+ if compiled_methods is not None:
+ self._test_env.WriteLines(self._compiled_methods_path, compiled_methods)
+ cmd += ['-Xcompiler-option', '--compiled-methods={0}'.format(
+ self._compiled_methods_path)]
+ if passes_to_run is not None:
+ self._test_env.WriteLines(self._passes_to_run_path, passes_to_run)
+ cmd += ['-Xcompiler-option', '--run-passes={0}'.format(
+ self._passes_to_run_path)]
+ if verbose_compiler:
+ cmd += ['-Xcompiler-option', '--runtime-arg', '-Xcompiler-option',
+ '-verbose:compiler', '-Xcompiler-option', '-j1']
+ cmd += self._base_cmd[1:]
+ return cmd
+
+
+class IOutputCheck(object):
+ """Abstract output checking class.
+
+ Checks if output is correct.
+ """
+ __meta_class__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def Check(self, output):
+ """Check if output is correct.
+
+ Args:
+ output: string, output to check.
+
+ Returns:
+ boolean, True if output is correct, False otherwise.
+ """
+
+
+class EqualsOutputCheck(IOutputCheck):
+ """Concrete output checking class checking for equality to expected output."""
+
+ def __init__(self, expected_output):
+ """Constructor.
+
+ Args:
+ expected_output: string, expected output.
+ """
+ self._expected_output = expected_output
+
+ def Check(self, output):
+ """See base class."""
+ return self._expected_output == output
+
+
+class ExternalScriptOutputCheck(IOutputCheck):
+ """Concrete output checking class calling an external script.
+
+ The script should accept two arguments, path to expected output and path to
+ program output. It should exit with 0 return code if outputs are equivalent
+ and with different return code otherwise.
+ """
+
+ def __init__(self, script_path, expected_output_path, logfile):
+ """Constructor.
+
+ Args:
+ script_path: string, path to checking script.
+ expected_output_path: string, path to file with expected output.
+ logfile: file handle, logfile.
+ """
+ self._script_path = script_path
+ self._expected_output_path = expected_output_path
+ self._logfile = logfile
+
+ def Check(self, output):
+ """See base class."""
+ ret_code = None
+ with NamedTemporaryFile(mode='w', delete=False) as temp_file:
+ temp_file.write(output)
+ temp_file.flush()
+ ret_code = call(
+ [self._script_path, self._expected_output_path, temp_file.name],
+ stdout=self._logfile, stderr=self._logfile, universal_newlines=True)
+ return ret_code == 0
+
+
+def BinarySearch(start, end, test):
+ """Binary search integers using test function to guide the process."""
+ while start < end:
+ mid = (start + end) // 2
+ if test(mid):
+ start = mid + 1
+ else:
+ end = mid
+ return start
+
+
+def FilterPasses(passes, cutoff_idx):
+ """Filters passes list according to cutoff_idx but keeps mandatory passes."""
+ return [opt_pass for idx, opt_pass in enumerate(passes)
+ if opt_pass in MANDATORY_PASSES or idx < cutoff_idx]
+
+
+def BugSearch(testable):
+ """Find buggy (method, optimization pass) pair for a given testable.
+
+ Args:
+ testable: Dex2OatWrapperTestable.
+
+ Returns:
+ (string, string) tuple. First element is name of method which when compiled
+ exposes test failure. Second element is name of optimization pass such that
+ for aforementioned method running all passes up to and excluding the pass
+ results in test passing but running all passes up to and including the pass
+ results in test failing.
+
+ (None, None) if test passes when compiling all methods.
+ (string, None) if a method is found which exposes the failure, but the
+ failure happens even when running just mandatory passes.
+
+ Raises:
+ FatalError: Testable fails with no methods compiled.
+ AssertionError: Method failed for all passes when bisecting methods, but
+ passed when bisecting passes. Possible sporadic failure.
+ """
+ all_methods = testable.GetAllMethods()
+ faulty_method_idx = BinarySearch(
+ 0,
+ len(all_methods) + 1,
+ lambda mid: testable.Test(all_methods[0:mid]))
+ if faulty_method_idx == len(all_methods) + 1:
+ return (None, None)
+ if faulty_method_idx == 0:
+ raise FatalError('Testable fails with no methods compiled. '
+ 'Perhaps issue lies outside of compiler.')
+ faulty_method = all_methods[faulty_method_idx - 1]
+ all_passes = testable.GetAllPassesForMethod(faulty_method)
+ faulty_pass_idx = BinarySearch(
+ 0,
+ len(all_passes) + 1,
+ lambda mid: testable.Test([faulty_method],
+ FilterPasses(all_passes, mid)))
+ if faulty_pass_idx == 0:
+ return (faulty_method, None)
+ assert faulty_pass_idx != len(all_passes) + 1, ('Method must fail for some '
+ 'passes.')
+ faulty_pass = all_passes[faulty_pass_idx - 1]
+ return (faulty_method, faulty_pass)
+
+
+def PrepareParser():
+ """Prepares argument parser."""
+ parser = argparse.ArgumentParser(
+ description='Tool for finding compiler bugs. Either --raw-cmd or both '
+ '-cp and --class are required.')
+ command_opts = parser.add_argument_group('dalvikvm command options')
+ command_opts.add_argument('-cp', '--classpath', type=str, help='classpath')
+ command_opts.add_argument('--class', dest='classname', type=str,
+ help='name of main class')
+ command_opts.add_argument('--lib', dest='lib', type=str, default='libart.so',
+ help='lib to use, default: libart.so')
+ command_opts.add_argument('--dalvikvm-option', dest='dalvikvm_opts',
+ metavar='OPT', nargs='*', default=[],
+ help='additional dalvikvm option')
+ command_opts.add_argument('--arg', dest='test_args', nargs='*', default=[],
+ metavar='ARG', help='argument passed to test')
+ command_opts.add_argument('--image', type=str, help='path to image')
+ command_opts.add_argument('--raw-cmd', dest='raw_cmd', type=str,
+ help='bisect with this command, ignore other '
+ 'command options')
+ bisection_opts = parser.add_argument_group('bisection options')
+ bisection_opts.add_argument('--64', dest='x64', action='store_true',
+ default=False, help='x64 mode')
+ bisection_opts.add_argument(
+ '--device', action='store_true', default=False, help='run on device')
+ bisection_opts.add_argument('--expected-output', type=str,
+ help='file containing expected output')
+ bisection_opts.add_argument(
+ '--check-script', dest='check_script', type=str,
+ help='script comparing output and expected output')
+ bisection_opts.add_argument('--verbose', action='store_true',
+ default=False, help='enable verbose output')
+ return parser
+
+
+def PrepareBaseCommand(args, classpath):
+ """Prepares base command used to run test."""
+ if args.raw_cmd:
+ return shlex.split(args.raw_cmd)
+ else:
+ base_cmd = ['dalvikvm64'] if args.x64 else ['dalvikvm32']
+ if not args.device:
+ base_cmd += ['-XXlib:{0}'.format(args.lib)]
+ if not args.image:
+ image_path = '{0}/framework/core-optimizing-pic.art'.format(
+ GetEnvVariableOrError('ANDROID_HOST_OUT'))
+ else:
+ image_path = args.image
+ base_cmd += ['-Ximage:{0}'.format(image_path)]
+ if args.dalvikvm_opts:
+ base_cmd += args.dalvikvm_opts
+ base_cmd += ['-cp', classpath, args.classname] + args.test_args
+ return base_cmd
+
+
+def main():
+ # Parse arguments
+ parser = PrepareParser()
+ args = parser.parse_args()
+ if not args.raw_cmd and (not args.classpath or not args.classname):
+ parser.error('Either --raw-cmd or both -cp and --class are required')
+
+ # Prepare environment
+ classpath = args.classpath
+ if args.device:
+ test_env = DeviceTestEnv()
+ if classpath:
+ classpath = test_env.PushClasspath(classpath)
+ else:
+ test_env = HostTestEnv(args.x64)
+ base_cmd = PrepareBaseCommand(args, classpath)
+ output_checker = None
+ if args.expected_output:
+ if args.check_script:
+ output_checker = ExternalScriptOutputCheck(
+ args.check_script, args.expected_output, test_env.logfile)
+ else:
+ with open(args.expected_output, 'r') as expected_output_file:
+ output_checker = EqualsOutputCheck(expected_output_file.read())
+
+ # Perform the search
+ try:
+ testable = Dex2OatWrapperTestable(base_cmd, test_env, output_checker,
+ args.verbose)
+ (method, opt_pass) = BugSearch(testable)
+ except Exception as e:
+ print('Error. Refer to logfile: {0}'.format(test_env.logfile.name))
+ test_env.logfile.write('Exception: {0}\n'.format(e))
+ raise
+
+ # Report results
+ if method is None:
+ print('Couldn\'t find any bugs.')
+ elif opt_pass is None:
+ print('Faulty method: {0}. Fails with just mandatory passes.'.format(
+ method))
+ else:
+ print('Faulty method and pass: {0}, {1}.'.format(method, opt_pass))
+ print('Logfile: {0}'.format(test_env.logfile.name))
+ sys.exit(0)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/bisection-search/bisection_test.py b/tools/bisection_search/bisection_test.py
similarity index 100%
rename from tools/bisection-search/bisection_test.py
rename to tools/bisection_search/bisection_test.py
diff --git a/tools/bisection-search/common.py b/tools/bisection_search/common.py
similarity index 80%
rename from tools/bisection-search/common.py
rename to tools/bisection_search/common.py
index 8361fc9..d5029bb 100755
--- a/tools/bisection-search/common.py
+++ b/tools/bisection_search/common.py
@@ -23,6 +23,7 @@
from subprocess import check_call
from subprocess import PIPE
from subprocess import Popen
+from subprocess import STDOUT
from subprocess import TimeoutExpired
from tempfile import mkdtemp
@@ -81,19 +82,20 @@
Returns:
tuple (string, string, int) stdout output, stderr output, return code.
"""
- proc = Popen(cmd, stderr=PIPE, stdout=PIPE, env=env, universal_newlines=True)
+ proc = Popen(cmd, stderr=STDOUT, stdout=PIPE, env=env,
+ universal_newlines=True)
timeouted = False
try:
- (output, err_output) = proc.communicate(timeout=timeout)
+ (output, _) = proc.communicate(timeout=timeout)
except TimeoutExpired:
timeouted = True
proc.kill()
- (output, err_output) = proc.communicate()
- logfile.write('Command:\n{0}\n{1}{2}\nReturn code: {3}\n'.format(
- _CommandListToCommandString(cmd), err_output, output,
+ (output, _) = proc.communicate()
+ logfile.write('Command:\n{0}\n{1}\nReturn code: {2}\n'.format(
+ _CommandListToCommandString(cmd), output,
'TIMEOUT' if timeouted else proc.returncode))
ret_code = 1 if timeouted else proc.returncode
- return (output, err_output, ret_code)
+ return (output, ret_code)
def _CommandListToCommandString(cmd):
@@ -148,21 +150,18 @@
"""
@abc.abstractmethod
- def RunCommand(self, cmd):
- """Runs command in environment.
+ def RunCommand(self, cmd, env_updates=None):
+ """Runs command in environment with updated environmental variables.
Args:
- cmd: string, command to run.
-
+ cmd: list of strings, command to run.
+ env_updates: dict, string to string, maps names of variables to their
+ updated values.
Returns:
tuple (string, string, int) stdout output, stderr output, return code.
"""
@abc.abstractproperty
- def classpath(self):
- """Gets environment specific classpath with test class."""
-
- @abc.abstractproperty
def logfile(self):
"""Gets file handle to logfile residing on host."""
@@ -176,14 +175,12 @@
For methods documentation see base class.
"""
- def __init__(self, classpath, x64):
+ def __init__(self, x64):
"""Constructor.
Args:
- classpath: string, classpath with test class.
x64: boolean, whether to setup in x64 mode.
"""
- self._classpath = classpath
self._env_path = mkdtemp(dir='/tmp/', prefix='bisection_search_')
self._logfile = open('{0}/log'.format(self._env_path), 'w+')
os.mkdir('{0}/dalvik-cache'.format(self._env_path))
@@ -197,6 +194,7 @@
self._shell_env['ANDROID_DATA'] = self._env_path
self._shell_env['ANDROID_ROOT'] = android_root
self._shell_env['LD_LIBRARY_PATH'] = library_path
+ self._shell_env['DYLD_LIBRARY_PATH'] = library_path
self._shell_env['PATH'] = (path + ':' + self._shell_env['PATH'])
# Using dlopen requires load bias on the host.
self._shell_env['LD_USE_LOAD_BIAS'] = '1'
@@ -213,13 +211,13 @@
f.writelines('{0}\n'.format(line) for line in lines)
return
- def RunCommand(self, cmd):
+ def RunCommand(self, cmd, env_updates=None):
+ if not env_updates:
+ env_updates = {}
self._EmptyDexCache()
- return _RunCommandForOutputAndLog(cmd, self._shell_env, self._logfile)
-
- @property
- def classpath(self):
- return self._classpath
+ env = self._shell_env.copy()
+ env.update(env_updates)
+ return _RunCommandForOutputAndLog(cmd, env, self._logfile)
@property
def logfile(self):
@@ -247,32 +245,18 @@
For methods documentation see base class.
"""
- def __init__(self, classpath):
- """Constructor.
-
- Args:
- classpath: string, classpath with test class.
- """
+ def __init__(self):
+ """Constructor."""
self._host_env_path = mkdtemp(dir='/tmp/', prefix='bisection_search_')
self._logfile = open('{0}/log'.format(self._host_env_path), 'w+')
self._device_env_path = '{0}/{1}'.format(
DEVICE_TMP_PATH, os.path.basename(self._host_env_path))
- self._classpath = os.path.join(
- self._device_env_path, os.path.basename(classpath))
- self._shell_env = os.environ
+ self._shell_env = os.environ.copy()
self._AdbMkdir('{0}/dalvik-cache'.format(self._device_env_path))
for arch_cache_path in _DexArchCachePaths(self._device_env_path):
self._AdbMkdir(arch_cache_path)
- paths = classpath.split(':')
- device_paths = []
- for path in paths:
- device_paths.append('{0}/{1}'.format(
- self._device_env_path, os.path.basename(path)))
- self._AdbPush(path, self._device_env_path)
- self._classpath = ':'.join(device_paths)
-
def CreateFile(self, name=None):
with NamedTemporaryFile(mode='w') as temp_file:
self._AdbPush(temp_file.name, self._device_env_path)
@@ -283,25 +267,47 @@
def WriteLines(self, file_path, lines):
with NamedTemporaryFile(mode='w') as temp_file:
temp_file.writelines('{0}\n'.format(line) for line in lines)
+ temp_file.flush()
self._AdbPush(temp_file.name, file_path)
return
- def RunCommand(self, cmd):
+ def RunCommand(self, cmd, env_updates=None):
+ if not env_updates:
+ env_updates = {}
self._EmptyDexCache()
+ if 'ANDROID_DATA' not in env_updates:
+ env_updates['ANDROID_DATA'] = self._device_env_path
+ env_updates_cmd = ' '.join(['{0}={1}'.format(var, val) for var, val
+ in env_updates.items()])
cmd = _CommandListToCommandString(cmd)
- cmd = ('adb shell "logcat -c && ANDROID_DATA={0} {1} && '
- 'logcat -d dex2oat:* *:S 1>&2"').format(self._device_env_path, cmd)
- return _RunCommandForOutputAndLog(shlex.split(cmd), self._shell_env,
- self._logfile)
-
- @property
- def classpath(self):
- return self._classpath
+ cmd = ('adb shell "logcat -c && {0} {1} ; logcat -d -s dex2oat:* dex2oatd:*'
+ '| grep -v "^---------" 1>&2"').format(env_updates_cmd, cmd)
+ return _RunCommandForOutputAndLog(
+ shlex.split(cmd), self._shell_env, self._logfile)
@property
def logfile(self):
return self._logfile
+ def PushClasspath(self, classpath):
+ """Push classpath to on-device test directory.
+
+ Classpath can contain multiple colon separated file paths, each file is
+ pushed. Returns analogous classpath with paths valid on device.
+
+ Args:
+ classpath: string, classpath in format 'a/b/c:d/e/f'.
+ Returns:
+ string, classpath valid on device.
+ """
+ paths = classpath.split(':')
+ device_paths = []
+ for path in paths:
+ device_paths.append('{0}/{1}'.format(
+ self._device_env_path, os.path.basename(path)))
+ self._AdbPush(path, self._device_env_path)
+ return ':'.join(device_paths)
+
def _AdbPush(self, what, where):
check_call(shlex.split('adb push "{0}" "{1}"'.format(what, where)),
stdout=self._logfile, stderr=self._logfile)
diff --git a/tools/cpp-define-generator/Android.bp b/tools/cpp-define-generator/Android.bp
new file mode 100644
index 0000000..d792e90
--- /dev/null
+++ b/tools/cpp-define-generator/Android.bp
@@ -0,0 +1,36 @@
+//
+// Copyright (C) 2014 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Build a "data" binary which will hold all the symbol values that will be parsed by the other scripts.
+//
+// Builds are for host only, target-specific define generation is possibly but is trickier and would need extra tooling.
+//
+// In the future we may wish to parameterize this on (32,64)x(read_barrier,no_read_barrier).
+
+art_cc_binary {
+ name: "cpp-define-generator-data",
+ host_supported: true,
+ device_supported: false,
+ defaults: [
+ "art_debug_defaults",
+ "art_defaults",
+ ],
+ include_dirs: ["art/runtime"],
+ srcs: ["main.cc"],
+ shared_libs: [
+ "libbase",
+ ],
+}
diff --git a/tools/cpp-define-generator/Android.mk b/tools/cpp-define-generator/Android.mk
deleted file mode 100644
index 6ba643c..0000000
--- a/tools/cpp-define-generator/Android.mk
+++ /dev/null
@@ -1,34 +0,0 @@
-#
-# Copyright (C) 2014 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-LOCAL_PATH := $(call my-dir)
-
-include art/build/Android.executable.mk
-
-CPP_DEFINE_GENERATOR_SRC_FILES := \
- main.cc
-
-CPP_DEFINE_GENERATOR_EXTRA_SHARED_LIBRARIES :=
-CPP_DEFINE_GENERATOR_EXTRA_INCLUDE :=
-CPP_DEFINE_GENERATOR_MULTILIB :=
-
-# Build a "data" binary which will hold all the symbol values that will be parsed by the other scripts.
-#
-# Builds are for host only, target-specific define generation is possibly but is trickier and would need extra tooling.
-#
-# In the future we may wish to parameterize this on (32,64)x(read_barrier,no_read_barrier).
-$(eval $(call build-art-executable,cpp-define-generator-data,$(CPP_DEFINE_GENERATOR_SRC_FILES),$(CPP_DEFINE_GENERATOR_EXTRA_SHARED_LIBRARIES),$(CPP_DEFINE_GENERATOR_EXTRA_INCLUDE),host,debug,$(CPP_DEFINE_GENERATOR_MULTILIB),shared))
-
diff --git a/tools/cpp-define-generator/constant_dexcache.def b/tools/cpp-define-generator/constant_dexcache.def
index d10ca1e..ede16d2 100644
--- a/tools/cpp-define-generator/constant_dexcache.def
+++ b/tools/cpp-define-generator/constant_dexcache.def
@@ -24,5 +24,5 @@
art::mirror::DexCache::kDexCacheStringCacheSize - 1)
DEFINE_EXPR(STRING_DEX_CACHE_HASH_BITS, int32_t,
art::LeastSignificantBit(art::mirror::DexCache::kDexCacheStringCacheSize))
-DEFINE_EXPR(STRING_DEX_CACHE_ELEMENT_SIZE, size_t,
+DEFINE_EXPR(STRING_DEX_CACHE_ELEMENT_SIZE, int32_t,
sizeof(art::mirror::StringDexCachePair))
\ No newline at end of file
diff --git a/tools/dmtracedump/Android.bp b/tools/dmtracedump/Android.bp
new file mode 100644
index 0000000..4f942bd
--- /dev/null
+++ b/tools/dmtracedump/Android.bp
@@ -0,0 +1,44 @@
+// Copyright (C) 2015 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Java method trace dump tool
+
+art_cc_binary {
+ name: "dmtracedump",
+ host_supported: true,
+ device_supported: false,
+ srcs: ["tracedump.cc"],
+ cflags: [
+ "-O0",
+ "-g",
+ "-Wall",
+ ],
+ target: {
+ windows: {
+ enabled: true,
+ },
+ },
+}
+
+art_cc_binary {
+ name: "create_test_dmtrace",
+ host_supported: true,
+ device_supported: false,
+ srcs: ["createtesttrace.cc"],
+ cflags: [
+ "-O0",
+ "-g",
+ "-Wall",
+ ],
+}
diff --git a/tools/dmtracedump/Android.mk b/tools/dmtracedump/Android.mk
deleted file mode 100644
index da0d632..0000000
--- a/tools/dmtracedump/Android.mk
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Java method trace dump tool
-
-LOCAL_PATH:= $(call my-dir)
-
-include $(CLEAR_VARS)
-LOCAL_CPP_EXTENSION := cc
-LOCAL_SRC_FILES := tracedump.cc
-LOCAL_CFLAGS += -O0 -g -Wall
-LOCAL_MODULE_HOST_OS := darwin linux windows
-LOCAL_MODULE := dmtracedump
-include $(BUILD_HOST_EXECUTABLE)
-
-include $(CLEAR_VARS)
-LOCAL_CPP_EXTENSION := cc
-LOCAL_SRC_FILES := createtesttrace.cc
-LOCAL_CFLAGS += -O0 -g -Wall
-LOCAL_MODULE := create_test_dmtrace
-include $(BUILD_HOST_EXECUTABLE)
diff --git a/tools/javafuzz/__init__.py b/tools/javafuzz/__init__.py
new file mode 100644
index 0000000..3955c71
--- /dev/null
+++ b/tools/javafuzz/__init__.py
@@ -0,0 +1,17 @@
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This file is intentionally left empty. It indicates that the directory is a Python package.
\ No newline at end of file
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index a69b58b..7b5e9ed 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -86,7 +86,6 @@
"libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithJarFtpURLConnection",
"libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithLoggingSocketHandler",
"libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithHttpURLConnection",
- "libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithJarHttpURLConnection",
"org.apache.harmony.luni.tests.internal.net.www.protocol.http.HttpURLConnectionTest",
"org.apache.harmony.luni.tests.internal.net.www.protocol.https.HttpsURLConnectionTest",
"org.apache.harmony.luni.tests.java.net.URLConnectionTest",
@@ -234,5 +233,11 @@
modes: [device],
names: ["libcore.java.lang.ProcessBuilderTest#testRedirectInherit",
"libcore.java.lang.ProcessBuilderTest#testRedirect_nullStreams"]
+},
+{
+ description: "Sometimes timeouts",
+ result: EXEC_FAILED,
+ bug: 31258002,
+ names: ["libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithJarHttpURLConnection"]
}
]
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index bdb2d4b..01dae43 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -50,6 +50,7 @@
host="no"
# Use JIT compiling by default.
use_jit=true
+variant_cmdline_parameter="--variant=X32"
while true; do
if [[ "$1" == "--mode=host" ]]; then
@@ -93,11 +94,34 @@
shift
elif [[ "$1" == "" ]]; then
break
+ elif [[ $1 == --variant=* ]]; then
+ variant_cmdline_parameter=$1
+ shift
else
shift
fi
done
+# For the host:
+#
+# If, on the other hand, there is a variant set, use it to modify the art_debugee parameter to
+# force the fork to have the same bitness as the controller. This should be fine and not impact
+# testing (cross-bitness), as the protocol is always 64-bit anyways (our implementation).
+#
+# Note: this isn't necessary for the device as the BOOTCLASSPATH environment variable is set there
+# and used as a fallback.
+if [[ $host == "yes" ]]; then
+ variant=${variant_cmdline_parameter:10}
+ if [[ $variant == "x32" || $variant == "X32" ]]; then
+ art_debugee="$art_debugee --32"
+ elif [[ $variant == "x64" || $variant == "X64" ]]; then
+ art_debugee="$art_debugee --64"
+ else
+ echo "Error, do not understand variant $variant_cmdline_parameter."
+ exit 1
+ fi
+fi
+
if [[ "$image" != "" ]]; then
vm_args="--vm-arg $image"
fi