summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.mk4
-rw-r--r--build/Android.common_build.mk21
-rw-r--r--build/Android.oat.mk6
-rw-r--r--compiler/buffered_output_stream.h2
-rw-r--r--compiler/common_compiler_test.cc10
-rw-r--r--compiler/dex/bb_optimizations.h41
-rw-r--r--compiler/dex/local_value_numbering.cc11
-rw-r--r--compiler/dex/mir_graph.cc13
-rw-r--r--compiler/dex/mir_graph.h15
-rw-r--r--compiler/dex/mir_optimization.cc262
-rw-r--r--compiler/dex/pass_driver_me_opts.cc3
-rw-r--r--compiler/dex/quick/arm/assemble_arm.cc2
-rw-r--r--compiler/dex/quick/arm/int_arm.cc14
-rw-r--r--compiler/dex/quick/arm/utility_arm.cc2
-rw-r--r--compiler/dex/quick/arm64/assemble_arm64.cc10
-rw-r--r--compiler/dex/quick/arm64/utility_arm64.cc2
-rw-r--r--compiler/dex/quick/codegen_util.cc8
-rw-r--r--compiler/dex/quick/gen_common.cc2
-rwxr-xr-xcompiler/dex/quick/gen_invoke.cc9
-rw-r--r--compiler/dex/quick/mips/assemble_mips.cc2
-rw-r--r--compiler/dex/quick/mir_to_lir.cc5
-rw-r--r--compiler/dex/quick/mir_to_lir.h13
-rwxr-xr-xcompiler/dex/quick/x86/int_x86.cc42
-rw-r--r--compiler/dex/quick/x86/utility_x86.cc8
-rw-r--r--compiler/dex/ssa_transformation.cc18
-rw-r--r--compiler/driver/compiler_driver.cc6
-rw-r--r--compiler/elf_builder.h534
-rw-r--r--compiler/elf_writer_mclinker.cc2
-rw-r--r--compiler/elf_writer_quick.cc12
-rw-r--r--compiler/file_output_stream.h2
-rw-r--r--compiler/image_test.cc12
-rw-r--r--compiler/image_writer.cc26
-rw-r--r--compiler/image_writer.h16
-rw-r--r--compiler/jni/quick/jni_compiler.cc4
-rw-r--r--compiler/oat_test.cc2
-rw-r--r--compiler/oat_writer.cc2
-rw-r--r--compiler/optimizing/builder.h2
-rw-r--r--compiler/optimizing/code_generator_arm.cc12
-rw-r--r--compiler/optimizing/code_generator_x86.cc12
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc12
-rw-r--r--compiler/optimizing/graph_visualizer.h2
-rw-r--r--compiler/optimizing/locations.h25
-rw-r--r--compiler/optimizing/nodes.h2
-rw-r--r--compiler/optimizing/parallel_move_resolver.h2
-rw-r--r--compiler/optimizing/stack_map_stream.h2
-rw-r--r--compiler/output_stream_test.cc2
-rw-r--r--compiler/utils/arena_object.h (renamed from compiler/utils/allocation.h)17
-rw-r--r--compiler/utils/arm/assembler_arm.cc2
-rw-r--r--compiler/utils/arm/assembler_arm.h2
-rw-r--r--compiler/utils/arm/assembler_arm32.h2
-rw-r--r--compiler/utils/arm/assembler_thumb2.h2
-rw-r--r--compiler/utils/arm/constants_arm.h4
-rw-r--r--compiler/utils/arm64/assembler_arm64.cc4
-rw-r--r--compiler/utils/arm64/assembler_arm64.h11
-rw-r--r--compiler/utils/arm64/constants_arm64.h3
-rw-r--r--compiler/utils/assembler.cc6
-rw-r--r--compiler/utils/assembler.h28
-rw-r--r--compiler/utils/x86/assembler_x86.cc8
-rw-r--r--compiler/utils/x86/assembler_x86.h6
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc8
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h2
-rw-r--r--compiler/vector_output_stream.cc6
-rw-r--r--compiler/vector_output_stream.h16
-rw-r--r--disassembler/disassembler_arm.cc11
-rw-r--r--disassembler/disassembler_x86.cc2
-rw-r--r--oatdump/oatdump.cc31
-rw-r--r--patchoat/patchoat.cc4
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S12
-rw-r--r--runtime/arch/x86/context_x86.cc2
-rw-r--r--runtime/arch/x86/fault_handler_x86.cc2
-rw-r--r--runtime/arch/x86_64/context_x86_64.cc2
-rw-r--r--runtime/base/bit_field.h24
-rw-r--r--runtime/base/hex_dump.cc10
-rw-r--r--runtime/base/hex_dump_test.cc2
-rw-r--r--runtime/base/macros.h42
-rw-r--r--runtime/base/value_object.h37
-rw-r--r--runtime/check_jni.cc2
-rw-r--r--runtime/class_linker.cc65
-rw-r--r--runtime/class_linker.h4
-rw-r--r--runtime/class_linker_test.cc138
-rw-r--r--runtime/debugger.cc1
-rw-r--r--runtime/dex_file-inl.h4
-rw-r--r--runtime/dex_file.cc32
-rw-r--r--runtime/dex_file.h60
-rw-r--r--runtime/dex_file_test.cc14
-rw-r--r--runtime/dex_file_verifier.cc48
-rw-r--r--runtime/dex_file_verifier.h14
-rw-r--r--runtime/dex_file_verifier_test.cc20
-rw-r--r--runtime/dex_instruction-inl.h20
-rw-r--r--runtime/dex_instruction.cc72
-rw-r--r--runtime/dex_method_iterator.h2
-rw-r--r--runtime/elf_file.cc102
-rw-r--r--runtime/elf_file.h6
-rw-r--r--runtime/elf_file_impl.h24
-rw-r--r--runtime/entrypoints/portable/portable_trampoline_entrypoints.cc14
-rw-r--r--runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc22
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc2
-rw-r--r--runtime/entrypoints_order_test.cc260
-rw-r--r--runtime/exception_test.cc2
-rw-r--r--runtime/gc/accounting/atomic_stack.h2
-rw-r--r--runtime/gc/accounting/card_table-inl.h60
-rw-r--r--runtime/gc/accounting/card_table.cc20
-rw-r--r--runtime/gc/accounting/card_table.h38
-rw-r--r--runtime/gc/accounting/card_table_test.cc40
-rw-r--r--runtime/gc/accounting/mod_union_table.cc18
-rw-r--r--runtime/gc/accounting/mod_union_table.h6
-rw-r--r--runtime/gc/accounting/remembered_set.cc12
-rw-r--r--runtime/gc/accounting/remembered_set.h4
-rw-r--r--runtime/gc/accounting/space_bitmap-inl.h36
-rw-r--r--runtime/gc/accounting/space_bitmap.cc48
-rw-r--r--runtime/gc/accounting/space_bitmap.h22
-rw-r--r--runtime/gc/accounting/space_bitmap_test.cc18
-rw-r--r--runtime/gc/allocator/rosalloc-inl.h2
-rw-r--r--runtime/gc/allocator/rosalloc.cc93
-rw-r--r--runtime/gc/allocator/rosalloc.h56
-rw-r--r--runtime/gc/collector/mark_compact.cc2
-rw-r--r--runtime/gc/collector/mark_compact.h2
-rw-r--r--runtime/gc/collector/mark_sweep.cc16
-rw-r--r--runtime/gc/collector/mark_sweep.h4
-rw-r--r--runtime/gc/collector/semi_space.cc8
-rw-r--r--runtime/gc/collector/semi_space.h2
-rw-r--r--runtime/gc/heap.cc20
-rw-r--r--runtime/gc/heap.h4
-rw-r--r--runtime/gc/heap_test.cc2
-rw-r--r--runtime/gc/space/bump_pointer_space-inl.h6
-rw-r--r--runtime/gc/space/bump_pointer_space.cc20
-rw-r--r--runtime/gc/space/bump_pointer_space.h10
-rw-r--r--runtime/gc/space/dlmalloc_space.cc14
-rw-r--r--runtime/gc/space/dlmalloc_space.h10
-rw-r--r--runtime/gc/space/dlmalloc_space_base_test.cc2
-rw-r--r--runtime/gc/space/dlmalloc_space_random_test.cc2
-rw-r--r--runtime/gc/space/dlmalloc_space_static_test.cc2
-rw-r--r--runtime/gc/space/image_space.cc4
-rw-r--r--runtime/gc/space/large_object_space.cc16
-rw-r--r--runtime/gc/space/large_object_space.h16
-rw-r--r--runtime/gc/space/large_object_space_test.cc6
-rw-r--r--runtime/gc/space/malloc_space.cc12
-rw-r--r--runtime/gc/space/malloc_space.h8
-rw-r--r--runtime/gc/space/rosalloc_space.cc12
-rw-r--r--runtime/gc/space/rosalloc_space.h6
-rw-r--r--runtime/gc/space/rosalloc_space_base_test.cc2
-rw-r--r--runtime/gc/space/rosalloc_space_random_test.cc2
-rw-r--r--runtime/gc/space/rosalloc_space_static_test.cc2
-rw-r--r--runtime/gc/space/space.h26
-rw-r--r--runtime/gc/space/space_test.h2
-rw-r--r--runtime/gc/space/valgrind_malloc_space-inl.h16
-rw-r--r--runtime/gc/space/valgrind_malloc_space.h2
-rw-r--r--runtime/globals.h10
-rw-r--r--runtime/handle_scope_test.cc8
-rw-r--r--runtime/hprof/hprof.cc4
-rw-r--r--runtime/image.cc4
-rw-r--r--runtime/image.h28
-rw-r--r--runtime/instruction_set_test.cc2
-rw-r--r--runtime/jdwp/jdwp_event.cc16
-rw-r--r--runtime/mem_map.cc36
-rw-r--r--runtime/mem_map.h16
-rw-r--r--runtime/mem_map_test.cc22
-rw-r--r--runtime/memory_region.h10
-rw-r--r--runtime/mirror/array-inl.h8
-rw-r--r--runtime/mirror/art_method-inl.h2
-rw-r--r--runtime/mirror/art_method.h4
-rw-r--r--runtime/mirror/class.cc7
-rw-r--r--runtime/mirror/class.h40
-rw-r--r--runtime/mirror/object-inl.h46
-rw-r--r--runtime/mirror/object.cc4
-rw-r--r--runtime/mirror/object.h4
-rw-r--r--runtime/mirror/object_test.cc3
-rw-r--r--runtime/monitor_pool.h2
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc2
-rw-r--r--runtime/native/java_lang_reflect_Field.cc1
-rw-r--r--runtime/oat_file-inl.h8
-rw-r--r--runtime/oat_file.cc34
-rw-r--r--runtime/oat_file.h26
-rw-r--r--runtime/stack.cc12
-rw-r--r--runtime/stack.h8
-rw-r--r--runtime/thread-inl.h4
-rw-r--r--runtime/thread.cc14
-rw-r--r--runtime/thread.h18
-rw-r--r--runtime/thread_list.cc8
-rw-r--r--runtime/utils.cc6
-rw-r--r--runtime/utils.h16
-rw-r--r--runtime/verifier/method_verifier.cc6
-rw-r--r--test/Android.run-test.mk2
184 files changed, 1916 insertions, 1712 deletions
diff --git a/Android.mk b/Android.mk
index 9a813fd9d9..282b17918a 100644
--- a/Android.mk
+++ b/Android.mk
@@ -30,9 +30,9 @@ ifneq (,$(filter clean-oat%,$(MAKECMDGOALS)))
art_dont_bother := true
endif
-# Don't bother with tests unless there is a test-art* or build-art* target.
+# Don't bother with tests unless there is a test-art*, build-art*, or related target.
art_test_bother := false
-ifneq (,$(filter %tests test-art% build-art%,$(MAKECMDGOALS)))
+ifneq (,$(filter %tests test-art% valgrind-test-art% build-art%,$(MAKECMDGOALS)))
art_test_bother := true
endif
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index 55a482197f..d2d6d2381f 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -86,18 +86,8 @@ endif
#
# Used to enable optimizing compiler
#
-ART_USE_OPTIMIZING_COMPILER := false
-ifneq ($(wildcard art/USE_OPTIMIZING_COMPILER),)
-$(info Enabling ART_USE_OPTIMIZING_COMPILER because of existence of art/USE_OPTIMIZING_COMPILER)
-ART_USE_OPTIMIZING_COMPILER := true
-endif
-ifeq ($(WITH_ART_USE_OPTIMIZING_COMPILER), true)
-ART_USE_OPTIMIZING_COMPILER := true
-endif
-
ifeq ($(ART_USE_OPTIMIZING_COMPILER),true)
DEX2OAT_FLAGS := --compiler-backend=Optimizing
-DALVIKVM_FLAGS += -Xcompiler-option --compiler-backend=Optimizing
endif
#
@@ -241,13 +231,20 @@ ART_TARGET_CFLAGS += -DART_BASE_ADDRESS_MIN_DELTA=$(LIBART_IMG_TARGET_MIN_BASE_A
ART_TARGET_CFLAGS += -DART_BASE_ADDRESS_MAX_DELTA=$(LIBART_IMG_TARGET_MAX_BASE_ADDRESS_DELTA)
# Colorize clang compiler warnings.
+art_clang_cflags := -fcolor-diagnostics
+
+# Warn if switch fallthroughs aren't annotated.
+art_clang_cflags += -Wimplicit-fallthrough
+
ifeq ($(ART_HOST_CLANG),true)
- ART_HOST_CFLAGS += -fcolor-diagnostics
+ ART_HOST_CFLAGS += $(art_clang_cflags)
endif
ifeq ($(ART_TARGET_CLANG),true)
- ART_TARGET_CFLAGS += -fcolor-diagnostics
+ ART_TARGET_CFLAGS += $(art_clang_cflags)
endif
+art_clang_cflags :=
+
ART_TARGET_LDFLAGS :=
ifeq ($(TARGET_CPU_SMP),true)
ART_TARGET_CFLAGS += -DANDROID_SMP=1
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index 6ef451f8cb..844f58ea47 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -21,7 +21,7 @@
# The main rules to build the default "boot" image are in
# build/core/dex_preopt_libart.mk
-include art/build/Android.common_path.mk
+include art/build/Android.common_build.mk
# Use dex2oat debug version for better error reporting
# $(1): 2ND_ or undefined, 2ND_ for 32-bit host builds.
@@ -31,7 +31,7 @@ define create-core-oat-host-rules
$$($(1)HOST_CORE_IMG_OUT): $$(HOST_CORE_DEX_LOCATIONS) $$(DEX2OAT_DEPENDENCY)
@echo "host dex2oat: $$@ ($$?)"
@mkdir -p $$(dir $$@)
- $$(hide) $$(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
+ $$(hide) $$(DEX2OAT) $$(DEX2OAT_FLAGS) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
--image-classes=$$(PRELOADED_CLASSES) $$(addprefix --dex-file=,$$(HOST_CORE_DEX_FILES)) \
$$(addprefix --dex-location=,$$(HOST_CORE_DEX_LOCATIONS)) --oat-file=$$($(1)HOST_CORE_OAT_OUT) \
--oat-location=$$($(1)HOST_CORE_OAT) --image=$$($(1)HOST_CORE_IMG_OUT) \
@@ -54,7 +54,7 @@ define create-core-oat-target-rules
$$($(1)TARGET_CORE_IMG_OUT): $$($(1)TARGET_CORE_DEX_FILES) $$(DEX2OAT_DEPENDENCY)
@echo "target dex2oat: $$@ ($$?)"
@mkdir -p $$(dir $$@)
- $$(hide) $$(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_XMS) --runtime-arg -Xmx$(DEX2OAT_XMX) \
+ $$(hide) $$(DEX2OAT) $$(DEX2OAT_FLAGS) --runtime-arg -Xms$(DEX2OAT_XMS) --runtime-arg -Xmx$(DEX2OAT_XMX) \
--image-classes=$$(PRELOADED_CLASSES) $$(addprefix --dex-file=,$$(TARGET_CORE_DEX_FILES)) \
$$(addprefix --dex-location=,$$(TARGET_CORE_DEX_LOCATIONS)) --oat-file=$$($(1)TARGET_CORE_OAT_OUT) \
--oat-location=$$($(1)TARGET_CORE_OAT) --image=$$($(1)TARGET_CORE_IMG_OUT) \
diff --git a/compiler/buffered_output_stream.h b/compiler/buffered_output_stream.h
index 75a3f24c70..bbc49df6d2 100644
--- a/compiler/buffered_output_stream.h
+++ b/compiler/buffered_output_stream.h
@@ -23,7 +23,7 @@
namespace art {
-class BufferedOutputStream : public OutputStream {
+class BufferedOutputStream FINAL : public OutputStream {
public:
explicit BufferedOutputStream(OutputStream* out);
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index fbaed9ffab..e3eb9e9915 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -144,12 +144,12 @@ CommonCompilerTest::~CommonCompilerTest() {}
OatFile::OatMethod CommonCompilerTest::CreateOatMethod(const void* code, const uint8_t* gc_map) {
CHECK(code != nullptr);
- const byte* base;
+ const uint8_t* base;
uint32_t code_offset, gc_map_offset;
if (gc_map == nullptr) {
- base = reinterpret_cast<const byte*>(code); // Base of data points at code.
- base -= kPointerSize; // Move backward so that code_offset != 0.
- code_offset = kPointerSize;
+ base = reinterpret_cast<const uint8_t*>(code); // Base of data points at code.
+ base -= sizeof(void*); // Move backward so that code_offset != 0.
+ code_offset = sizeof(void*);
gc_map_offset = 0;
} else {
// TODO: 64bit support.
@@ -398,7 +398,7 @@ void CommonCompilerTest::ReserveImageSpace() {
// accidentally end up colliding with the fixed memory address when we need to load the image.
std::string error_msg;
image_reservation_.reset(MemMap::MapAnonymous("image reservation",
- reinterpret_cast<byte*>(ART_BASE_ADDRESS),
+ reinterpret_cast<uint8_t*>(ART_BASE_ADDRESS),
(size_t)100 * 1024 * 1024, // 100MB
PROT_NONE,
false /* no need for 4gb flag with fixed mmap*/,
diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h
index b2c348bef1..fce23bc2fb 100644
--- a/compiler/dex/bb_optimizations.h
+++ b/compiler/dex/bb_optimizations.h
@@ -137,20 +137,20 @@ class CodeLayout : public PassME {
};
/**
- * @class NullCheckEliminationAndTypeInference
- * @brief Null check elimination and type inference.
+ * @class NullCheckElimination
+ * @brief Null check elimination pass.
*/
-class NullCheckEliminationAndTypeInference : public PassME {
+class NullCheckElimination : public PassME {
public:
- NullCheckEliminationAndTypeInference()
- : PassME("NCE_TypeInference", kRepeatingTopologicalSortTraversal, "4_post_nce_cfg") {
+ NullCheckElimination()
+ : PassME("NCE", kRepeatingTopologicalSortTraversal, "3_post_nce_cfg") {
}
- void Start(PassDataHolder* data) const {
+ bool Gate(const PassDataHolder* data) const {
DCHECK(data != nullptr);
- CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
DCHECK(c_unit != nullptr);
- c_unit->mir_graph->EliminateNullChecksAndInferTypesStart();
+ return c_unit->mir_graph->EliminateNullChecksGate();
}
bool Worker(PassDataHolder* data) const {
@@ -160,14 +160,35 @@ class NullCheckEliminationAndTypeInference : public PassME {
DCHECK(c_unit != nullptr);
BasicBlock* bb = pass_me_data_holder->bb;
DCHECK(bb != nullptr);
- return c_unit->mir_graph->EliminateNullChecksAndInferTypes(bb);
+ return c_unit->mir_graph->EliminateNullChecks(bb);
}
void End(PassDataHolder* data) const {
DCHECK(data != nullptr);
CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
DCHECK(c_unit != nullptr);
- c_unit->mir_graph->EliminateNullChecksAndInferTypesEnd();
+ c_unit->mir_graph->EliminateNullChecksEnd();
+ }
+};
+
+/**
+ * @class TypeInference
+ * @brief Type inference pass.
+ */
+class TypeInference : public PassME {
+ public:
+ TypeInference()
+ : PassME("TypeInference", kRepeatingTopologicalSortTraversal, "4_post_type_cfg") {
+ }
+
+ bool Worker(PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
+ CompilationUnit* c_unit = pass_me_data_holder->c_unit;
+ DCHECK(c_unit != nullptr);
+ BasicBlock* bb = pass_me_data_holder->bb;
+ DCHECK(bb != nullptr);
+ return c_unit->mir_graph->InferTypes(bb);
}
};
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index e411164f3a..eb9891606c 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -1460,7 +1460,7 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
uint16_t reg = GetOperandValue(mir->ssa_rep->uses[0]);
HandleNullCheck(mir, reg);
}
- // Intentional fall-through.
+ FALLTHROUGH_INTENDED;
case Instruction::INVOKE_STATIC:
case Instruction::INVOKE_STATIC_RANGE:
// Make ref args aliasing.
@@ -1583,7 +1583,7 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
uint16_t reg = GetOperandValue(mir->ssa_rep->uses[0]);
HandleNullCheck(mir, reg);
}
- // Intentional fall-through.
+ FALLTHROUGH_INTENDED;
case Instruction::NEG_INT:
case Instruction::NOT_INT:
case Instruction::NEG_FLOAT:
@@ -1610,7 +1610,6 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
}
break;
-
case Instruction::DOUBLE_TO_LONG:
case Instruction::LONG_TO_DOUBLE:
case Instruction::NEG_LONG:
@@ -1782,7 +1781,7 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::APUT_OBJECT:
HandlePutObject(mir);
- // Intentional fall-through.
+ FALLTHROUGH_INTENDED;
case Instruction::APUT:
case Instruction::APUT_WIDE:
case Instruction::APUT_BYTE:
@@ -1804,7 +1803,7 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::IPUT_OBJECT:
HandlePutObject(mir);
- // Intentional fall-through.
+ FALLTHROUGH_INTENDED;
case Instruction::IPUT:
case Instruction::IPUT_WIDE:
case Instruction::IPUT_BOOLEAN:
@@ -1826,7 +1825,7 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::SPUT_OBJECT:
HandlePutObject(mir);
- // Intentional fall-through.
+ FALLTHROUGH_INTENDED;
case Instruction::SPUT:
case Instruction::SPUT_WIDE:
case Instruction::SPUT_BOOLEAN:
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 276b886de0..f0c9858627 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -94,11 +94,11 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
topological_order_indexes_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
topological_order_loop_head_stack_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
i_dom_list_(NULL),
- def_block_matrix_(NULL),
temp_scoped_alloc_(),
temp_insn_data_(nullptr),
temp_bit_vector_size_(0u),
temp_bit_vector_(nullptr),
+ temp_bit_matrix_(nullptr),
temp_gvn_(),
block_list_(arena->Adapter(kArenaAllocBBList)),
try_block_addr_(NULL),
@@ -352,7 +352,7 @@ void MIRGraph::ProcessTryCatchBlocks() {
}
// Iterate over each of the handlers to enqueue the empty Catch blocks.
- const byte* handlers_ptr = DexFile::GetCatchHandlerData(*current_code_item_, 0);
+ const uint8_t* handlers_ptr = DexFile::GetCatchHandlerData(*current_code_item_, 0);
uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
for (uint32_t idx = 0; idx < handlers_size; idx++) {
CatchHandlerIterator iterator(handlers_ptr);
@@ -391,7 +391,7 @@ bool MIRGraph::IsBadMonitorExitCatch(NarrowDexOffset monitor_exit_offset,
switch (check_insn->Opcode()) {
case Instruction::MOVE_WIDE:
wide = true;
- // Intentional fall-through.
+ FALLTHROUGH_INTENDED;
case Instruction::MOVE_OBJECT:
case Instruction::MOVE:
dest = check_insn->VRegA_12x();
@@ -399,7 +399,7 @@ bool MIRGraph::IsBadMonitorExitCatch(NarrowDexOffset monitor_exit_offset,
case Instruction::MOVE_WIDE_FROM16:
wide = true;
- // Intentional fall-through.
+ FALLTHROUGH_INTENDED;
case Instruction::MOVE_OBJECT_FROM16:
case Instruction::MOVE_FROM16:
dest = check_insn->VRegA_22x();
@@ -407,7 +407,7 @@ bool MIRGraph::IsBadMonitorExitCatch(NarrowDexOffset monitor_exit_offset,
case Instruction::MOVE_WIDE_16:
wide = true;
- // Intentional fall-through.
+ FALLTHROUGH_INTENDED;
case Instruction::MOVE_OBJECT_16:
case Instruction::MOVE_16:
dest = check_insn->VRegA_32x();
@@ -417,7 +417,7 @@ bool MIRGraph::IsBadMonitorExitCatch(NarrowDexOffset monitor_exit_offset,
case Instruction::GOTO_16:
case Instruction::GOTO_32:
check_insn = check_insn->RelativeAt(check_insn->GetTargetOffset());
- // Intentional fall-through.
+ FALLTHROUGH_INTENDED;
default:
return check_insn->Opcode() == Instruction::MONITOR_EXIT &&
check_insn->VRegA_11x() == monitor_reg;
@@ -1706,6 +1706,7 @@ void MIRGraph::SSATransformationEnd() {
temp_bit_vector_size_ = 0u;
temp_bit_vector_ = nullptr;
+ temp_bit_matrix_ = nullptr; // Def block matrix.
DCHECK(temp_scoped_alloc_.get() != nullptr);
temp_scoped_alloc_.reset();
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index fe6fb75151..cc215bde06 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -198,9 +198,7 @@ struct BasicBlockDataFlow {
ArenaBitVector* use_v;
ArenaBitVector* def_v;
ArenaBitVector* live_in_v;
- ArenaBitVector* phi_v;
int32_t* vreg_to_ssa_map_exit;
- ArenaBitVector* ending_check_v; // For null check and class init check elimination.
};
/*
@@ -1022,9 +1020,10 @@ class MIRGraph {
int SRegToVReg(int ssa_reg) const;
void VerifyDataflow();
void CheckForDominanceFrontier(BasicBlock* dom_bb, const BasicBlock* succ_bb);
- void EliminateNullChecksAndInferTypesStart();
- bool EliminateNullChecksAndInferTypes(BasicBlock* bb);
- void EliminateNullChecksAndInferTypesEnd();
+ bool EliminateNullChecksGate();
+ bool EliminateNullChecks(BasicBlock* bb);
+ void EliminateNullChecksEnd();
+ bool InferTypes(BasicBlock* bb);
bool EliminateClassInitChecksGate();
bool EliminateClassInitChecks(BasicBlock* bb);
void EliminateClassInitChecksEnd();
@@ -1260,11 +1259,15 @@ class MIRGraph {
// Stack of the loop head indexes and recalculation flags for RepeatingTopologicalSortIterator.
ArenaVector<std::pair<uint16_t, bool>> topological_order_loop_head_stack_;
int* i_dom_list_;
- ArenaBitVector** def_block_matrix_; // original num registers x num_blocks.
std::unique_ptr<ScopedArenaAllocator> temp_scoped_alloc_;
uint16_t* temp_insn_data_;
uint32_t temp_bit_vector_size_;
ArenaBitVector* temp_bit_vector_;
+ // temp_bit_matrix_ used as one of
+ // - def_block_matrix: original num registers x num_blocks_,
+ // - ending_null_check_matrix: num_blocks_ x original num registers,
+ // - ending_clinit_check_matrix: num_blocks_ x unique class count.
+ ArenaBitVector** temp_bit_matrix_;
std::unique_ptr<GlobalValueNumbering> temp_gvn_;
static const int kInvalidEntry = -1;
ArenaVector<BasicBlock*> block_list_;
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 35dae00c96..322b737677 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -819,96 +819,91 @@ void MIRGraph::CombineBlocks(struct BasicBlock* bb) {
}
}
-void MIRGraph::EliminateNullChecksAndInferTypesStart() {
- if ((cu_->disable_opt & (1 << kNullCheckElimination)) == 0) {
- if (kIsDebugBuild) {
- AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
- CHECK(bb->data_flow_info == nullptr || bb->data_flow_info->ending_check_v == nullptr);
- }
- }
-
- DCHECK(temp_scoped_alloc_.get() == nullptr);
- temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
- temp_bit_vector_size_ = GetNumSSARegs();
- temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
- temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapTempSSARegisterV);
+bool MIRGraph::EliminateNullChecksGate() {
+ if ((cu_->disable_opt & (1 << kNullCheckElimination)) != 0 ||
+ (merged_df_flags_ & DF_HAS_NULL_CHKS) == 0) {
+ return false;
}
+
+ DCHECK(temp_scoped_alloc_.get() == nullptr);
+ temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
+ temp_bit_vector_size_ = GetNumSSARegs();
+ temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
+ temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapNullCheck);
+ temp_bit_matrix_ = static_cast<ArenaBitVector**>(
+ temp_scoped_alloc_->Alloc(sizeof(ArenaBitVector*) * GetNumBlocks(), kArenaAllocMisc));
+ std::fill_n(temp_bit_matrix_, GetNumBlocks(), nullptr);
+ return true;
}
/*
- * Eliminate unnecessary null checks for a basic block. Also, while we're doing
- * an iterative walk go ahead and perform type and size inference.
+ * Eliminate unnecessary null checks for a basic block.
*/
-bool MIRGraph::EliminateNullChecksAndInferTypes(BasicBlock* bb) {
- if (bb->data_flow_info == NULL) return false;
- bool infer_changed = false;
- bool do_nce = ((cu_->disable_opt & (1 << kNullCheckElimination)) == 0);
+bool MIRGraph::EliminateNullChecks(BasicBlock* bb) {
+ if (bb->data_flow_info == nullptr) return false;
ArenaBitVector* ssa_regs_to_check = temp_bit_vector_;
- if (do_nce) {
- /*
- * Set initial state. Catch blocks don't need any special treatment.
- */
- if (bb->block_type == kEntryBlock) {
- ssa_regs_to_check->ClearAllBits();
- // Assume all ins are objects.
- for (uint16_t in_reg = GetFirstInVR();
- in_reg < GetNumOfCodeVRs(); in_reg++) {
- ssa_regs_to_check->SetBit(in_reg);
- }
- if ((cu_->access_flags & kAccStatic) == 0) {
- // If non-static method, mark "this" as non-null
- int this_reg = GetFirstInVR();
- ssa_regs_to_check->ClearBit(this_reg);
- }
- } else if (bb->predecessors.size() == 1) {
- BasicBlock* pred_bb = GetBasicBlock(bb->predecessors[0]);
- // pred_bb must have already been processed at least once.
- DCHECK(pred_bb->data_flow_info->ending_check_v != nullptr);
- ssa_regs_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
- if (pred_bb->block_type == kDalvikByteCode) {
- // Check to see if predecessor had an explicit null-check.
- MIR* last_insn = pred_bb->last_mir_insn;
- if (last_insn != nullptr) {
- Instruction::Code last_opcode = last_insn->dalvikInsn.opcode;
- if (last_opcode == Instruction::IF_EQZ) {
- if (pred_bb->fall_through == bb->id) {
- // The fall-through of a block following a IF_EQZ, set the vA of the IF_EQZ to show that
- // it can't be null.
- ssa_regs_to_check->ClearBit(last_insn->ssa_rep->uses[0]);
- }
- } else if (last_opcode == Instruction::IF_NEZ) {
- if (pred_bb->taken == bb->id) {
- // The taken block following a IF_NEZ, set the vA of the IF_NEZ to show that it can't be
- // null.
- ssa_regs_to_check->ClearBit(last_insn->ssa_rep->uses[0]);
- }
+ /*
+ * Set initial state. Catch blocks don't need any special treatment.
+ */
+ if (bb->block_type == kEntryBlock) {
+ ssa_regs_to_check->ClearAllBits();
+ // Assume all ins are objects.
+ for (uint16_t in_reg = GetFirstInVR();
+ in_reg < GetNumOfCodeVRs(); in_reg++) {
+ ssa_regs_to_check->SetBit(in_reg);
+ }
+ if ((cu_->access_flags & kAccStatic) == 0) {
+ // If non-static method, mark "this" as non-null
+ int this_reg = GetFirstInVR();
+ ssa_regs_to_check->ClearBit(this_reg);
+ }
+ } else if (bb->predecessors.size() == 1) {
+ BasicBlock* pred_bb = GetBasicBlock(bb->predecessors[0]);
+ // pred_bb must have already been processed at least once.
+ DCHECK(temp_bit_matrix_[pred_bb->id] != nullptr);
+ ssa_regs_to_check->Copy(temp_bit_matrix_[pred_bb->id]);
+ if (pred_bb->block_type == kDalvikByteCode) {
+ // Check to see if predecessor had an explicit null-check.
+ MIR* last_insn = pred_bb->last_mir_insn;
+ if (last_insn != nullptr) {
+ Instruction::Code last_opcode = last_insn->dalvikInsn.opcode;
+ if (last_opcode == Instruction::IF_EQZ) {
+ if (pred_bb->fall_through == bb->id) {
+ // The fall-through of a block following a IF_EQZ, set the vA of the IF_EQZ to show that
+ // it can't be null.
+ ssa_regs_to_check->ClearBit(last_insn->ssa_rep->uses[0]);
+ }
+ } else if (last_opcode == Instruction::IF_NEZ) {
+ if (pred_bb->taken == bb->id) {
+ // The taken block following a IF_NEZ, set the vA of the IF_NEZ to show that it can't be
+ // null.
+ ssa_regs_to_check->ClearBit(last_insn->ssa_rep->uses[0]);
}
}
}
- } else {
- // Starting state is union of all incoming arcs
- bool copied_first = false;
- for (BasicBlockId pred_id : bb->predecessors) {
- BasicBlock* pred_bb = GetBasicBlock(pred_id);
- DCHECK(pred_bb != nullptr);
- DCHECK(pred_bb->data_flow_info != nullptr);
- if (pred_bb->data_flow_info->ending_check_v == nullptr) {
- continue;
- }
- if (!copied_first) {
- copied_first = true;
- ssa_regs_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
- } else {
- ssa_regs_to_check->Union(pred_bb->data_flow_info->ending_check_v);
- }
+ }
+ } else {
+ // Starting state is union of all incoming arcs
+ bool copied_first = false;
+ for (BasicBlockId pred_id : bb->predecessors) {
+ BasicBlock* pred_bb = GetBasicBlock(pred_id);
+ DCHECK(pred_bb != nullptr);
+ DCHECK(pred_bb->data_flow_info != nullptr);
+ if (temp_bit_matrix_[pred_bb->id] == nullptr) {
+ continue;
+ }
+ if (!copied_first) {
+ copied_first = true;
+ ssa_regs_to_check->Copy(temp_bit_matrix_[pred_bb->id]);
+ } else {
+ ssa_regs_to_check->Union(temp_bit_matrix_[pred_bb->id]);
}
- DCHECK(copied_first); // At least one predecessor must have been processed before this bb.
}
- // At this point, ssa_regs_to_check shows which sregs have an object definition with
- // no intervening uses.
+ DCHECK(copied_first); // At least one predecessor must have been processed before this bb.
}
+ // At this point, ssa_regs_to_check shows which sregs have an object definition with
+ // no intervening uses.
// Walk through the instruction in the block, updating as necessary
for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
@@ -916,12 +911,6 @@ bool MIRGraph::EliminateNullChecksAndInferTypes(BasicBlock* bb) {
continue;
}
- // Propagate type info.
- infer_changed = InferTypeAndSize(bb, mir, infer_changed);
- if (!do_nce) {
- continue;
- }
-
uint64_t df_attributes = GetDataFlowAttributes(mir);
// Might need a null check?
@@ -1022,35 +1011,48 @@ bool MIRGraph::EliminateNullChecksAndInferTypes(BasicBlock* bb) {
// Did anything change?
bool nce_changed = false;
- if (do_nce) {
- if (bb->data_flow_info->ending_check_v == nullptr) {
- DCHECK(temp_scoped_alloc_.get() != nullptr);
- bb->data_flow_info->ending_check_v = new (temp_scoped_alloc_.get()) ArenaBitVector(
- temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapNullCheck);
- nce_changed = ssa_regs_to_check->GetHighestBitSet() != -1;
- bb->data_flow_info->ending_check_v->Copy(ssa_regs_to_check);
- } else if (!ssa_regs_to_check->SameBitsSet(bb->data_flow_info->ending_check_v)) {
- nce_changed = true;
- bb->data_flow_info->ending_check_v->Copy(ssa_regs_to_check);
- }
+ ArenaBitVector* old_ending_ssa_regs_to_check = temp_bit_matrix_[bb->id];
+ if (old_ending_ssa_regs_to_check == nullptr) {
+ DCHECK(temp_scoped_alloc_.get() != nullptr);
+ nce_changed = ssa_regs_to_check->GetHighestBitSet() != -1;
+ temp_bit_matrix_[bb->id] = ssa_regs_to_check;
+ // Create a new ssa_regs_to_check for next BB.
+ temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
+ temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapNullCheck);
+ } else if (!ssa_regs_to_check->SameBitsSet(old_ending_ssa_regs_to_check)) {
+ nce_changed = true;
+ temp_bit_matrix_[bb->id] = ssa_regs_to_check;
+ temp_bit_vector_ = old_ending_ssa_regs_to_check; // Reuse for ssa_regs_to_check for next BB.
}
- return infer_changed | nce_changed;
+ return nce_changed;
}
-void MIRGraph::EliminateNullChecksAndInferTypesEnd() {
- if ((cu_->disable_opt & (1 << kNullCheckElimination)) == 0) {
- // Clean up temporaries.
- temp_bit_vector_size_ = 0u;
- temp_bit_vector_ = nullptr;
- AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
- if (bb->data_flow_info != nullptr) {
- bb->data_flow_info->ending_check_v = nullptr;
- }
+void MIRGraph::EliminateNullChecksEnd() {
+ // Clean up temporaries.
+ temp_bit_vector_size_ = 0u;
+ temp_bit_vector_ = nullptr;
+ temp_bit_matrix_ = nullptr;
+ DCHECK(temp_scoped_alloc_.get() != nullptr);
+ temp_scoped_alloc_.reset();
+}
+
+/*
+ * Perform type and size inference for a basic block.
+ */
+bool MIRGraph::InferTypes(BasicBlock* bb) {
+ if (bb->data_flow_info == nullptr) return false;
+
+ bool infer_changed = false;
+ for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ if (mir->ssa_rep == NULL) {
+ continue;
}
- DCHECK(temp_scoped_alloc_.get() != nullptr);
- temp_scoped_alloc_.reset();
+
+ // Propagate type info.
+ infer_changed = InferTypeAndSize(bb, mir, infer_changed);
}
+
+ return infer_changed;
}
bool MIRGraph::EliminateClassInitChecksGate() {
@@ -1059,13 +1061,6 @@ bool MIRGraph::EliminateClassInitChecksGate() {
return false;
}
- if (kIsDebugBuild) {
- AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
- CHECK(bb->data_flow_info == nullptr || bb->data_flow_info->ending_check_v == nullptr);
- }
- }
-
DCHECK(temp_scoped_alloc_.get() == nullptr);
temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
@@ -1139,6 +1134,9 @@ bool MIRGraph::EliminateClassInitChecksGate() {
temp_bit_vector_size_ = unique_class_count;
temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapClInitCheck);
+ temp_bit_matrix_ = static_cast<ArenaBitVector**>(
+ temp_scoped_alloc_->Alloc(sizeof(ArenaBitVector*) * GetNumBlocks(), kArenaAllocMisc));
+ std::fill_n(temp_bit_matrix_, GetNumBlocks(), nullptr);
DCHECK_GT(temp_bit_vector_size_, 0u);
return true;
}
@@ -1148,7 +1146,7 @@ bool MIRGraph::EliminateClassInitChecksGate() {
*/
bool MIRGraph::EliminateClassInitChecks(BasicBlock* bb) {
DCHECK_EQ((cu_->disable_opt & (1 << kClassInitCheckElimination)), 0u);
- if (bb->data_flow_info == NULL) {
+ if (bb->data_flow_info == nullptr) {
return false;
}
@@ -1164,8 +1162,8 @@ bool MIRGraph::EliminateClassInitChecks(BasicBlock* bb) {
// pred_bb must have already been processed at least once.
DCHECK(pred_bb != nullptr);
DCHECK(pred_bb->data_flow_info != nullptr);
- DCHECK(pred_bb->data_flow_info->ending_check_v != nullptr);
- classes_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
+ DCHECK(temp_bit_matrix_[pred_bb->id] != nullptr);
+ classes_to_check->Copy(temp_bit_matrix_[pred_bb->id]);
} else {
// Starting state is union of all incoming arcs.
bool copied_first = false;
@@ -1173,14 +1171,14 @@ bool MIRGraph::EliminateClassInitChecks(BasicBlock* bb) {
BasicBlock* pred_bb = GetBasicBlock(pred_id);
DCHECK(pred_bb != nullptr);
DCHECK(pred_bb->data_flow_info != nullptr);
- if (pred_bb->data_flow_info->ending_check_v == nullptr) {
+ if (temp_bit_matrix_[pred_bb->id] == nullptr) {
continue;
}
if (!copied_first) {
copied_first = true;
- classes_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
+ classes_to_check->Copy(temp_bit_matrix_[pred_bb->id]);
} else {
- classes_to_check->Union(pred_bb->data_flow_info->ending_check_v);
+ classes_to_check->Union(temp_bit_matrix_[pred_bb->id]);
}
}
DCHECK(copied_first); // At least one predecessor must have been processed before this bb.
@@ -1211,16 +1209,18 @@ bool MIRGraph::EliminateClassInitChecks(BasicBlock* bb) {
// Did anything change?
bool changed = false;
- if (bb->data_flow_info->ending_check_v == nullptr) {
+ ArenaBitVector* old_ending_classes_to_check = temp_bit_matrix_[bb->id];
+ if (old_ending_classes_to_check == nullptr) {
DCHECK(temp_scoped_alloc_.get() != nullptr);
- DCHECK(bb->data_flow_info != nullptr);
- bb->data_flow_info->ending_check_v = new (temp_scoped_alloc_.get()) ArenaBitVector(
- temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapClInitCheck);
changed = classes_to_check->GetHighestBitSet() != -1;
- bb->data_flow_info->ending_check_v->Copy(classes_to_check);
- } else if (!classes_to_check->Equal(bb->data_flow_info->ending_check_v)) {
+ temp_bit_matrix_[bb->id] = classes_to_check;
+ // Create a new classes_to_check for next BB.
+ temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
+ temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapClInitCheck);
+ } else if (!classes_to_check->Equal(old_ending_classes_to_check)) {
changed = true;
- bb->data_flow_info->ending_check_v->Copy(classes_to_check);
+ temp_bit_matrix_[bb->id] = classes_to_check;
+ temp_bit_vector_ = old_ending_classes_to_check; // Reuse for classes_to_check for next BB.
}
return changed;
}
@@ -1229,13 +1229,7 @@ void MIRGraph::EliminateClassInitChecksEnd() {
// Clean up temporaries.
temp_bit_vector_size_ = 0u;
temp_bit_vector_ = nullptr;
- AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
- if (bb->data_flow_info != nullptr) {
- bb->data_flow_info->ending_check_v = nullptr;
- }
- }
-
+ temp_bit_matrix_ = nullptr;
DCHECK(temp_insn_data_ != nullptr);
temp_insn_data_ = nullptr;
DCHECK(temp_scoped_alloc_.get() != nullptr);
diff --git a/compiler/dex/pass_driver_me_opts.cc b/compiler/dex/pass_driver_me_opts.cc
index 628106283b..cd3ffd4cc8 100644
--- a/compiler/dex/pass_driver_me_opts.cc
+++ b/compiler/dex/pass_driver_me_opts.cc
@@ -37,7 +37,8 @@ const Pass* const PassDriver<PassDriverMEOpts>::g_passes[] = {
GetPassInstance<CacheMethodLoweringInfo>(),
GetPassInstance<SpecialMethodInliner>(),
GetPassInstance<CodeLayout>(),
- GetPassInstance<NullCheckEliminationAndTypeInference>(),
+ GetPassInstance<NullCheckElimination>(),
+ GetPassInstance<TypeInference>(),
GetPassInstance<ClassInitCheckElimination>(),
GetPassInstance<GlobalValueNumberingPass>(),
GetPassInstance<BBCombine>(),
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index cf34948969..4e20d76604 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -1265,7 +1265,7 @@ void ArmMir2Lir::AssembleLIR() {
if (lir->operands[1] != rs_r15pc.GetReg()) {
break;
}
- // NOTE: intentional fallthrough.
+ FALLTHROUGH_INTENDED;
case kFixupLoad: {
/*
* PC-relative loads are mostly used to load immediates
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index eb80b4e9e2..018dc1c0c6 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -49,12 +49,13 @@ LIR* ArmMir2Lir::OpIT(ConditionCode ccode, const char* guide) {
int cond_bit = code & 1;
int alt_bit = cond_bit ^ 1;
- // Note: case fallthroughs intentional
switch (strlen(guide)) {
case 3:
mask1 = (guide[2] == 'T') ? cond_bit : alt_bit;
+ FALLTHROUGH_INTENDED;
case 2:
mask2 = (guide[1] == 'T') ? cond_bit : alt_bit;
+ FALLTHROUGH_INTENDED;
case 1:
mask3 = (guide[0] == 'T') ? cond_bit : alt_bit;
break;
@@ -62,6 +63,7 @@ LIR* ArmMir2Lir::OpIT(ConditionCode ccode, const char* guide) {
break;
default:
LOG(FATAL) << "OAT: bad case in OpIT";
+ UNREACHABLE();
}
mask = (mask3 << 3) | (mask2 << 2) | (mask1 << 1) |
(1 << (3 - strlen(guide)));
@@ -77,12 +79,13 @@ void ArmMir2Lir::UpdateIT(LIR* it, const char* new_guide) {
int cond_bit = code & 1;
int alt_bit = cond_bit ^ 1;
- // Note: case fallthroughs intentional
switch (strlen(new_guide)) {
case 3:
mask1 = (new_guide[2] == 'T') ? cond_bit : alt_bit;
+ FALLTHROUGH_INTENDED;
case 2:
mask2 = (new_guide[1] == 'T') ? cond_bit : alt_bit;
+ FALLTHROUGH_INTENDED;
case 1:
mask3 = (new_guide[0] == 'T') ? cond_bit : alt_bit;
break;
@@ -90,6 +93,7 @@ void ArmMir2Lir::UpdateIT(LIR* it, const char* new_guide) {
break;
default:
LOG(FATAL) << "OAT: bad case in UpdateIT";
+ UNREACHABLE();
}
mask = (mask3 << 3) | (mask2 << 2) | (mask1 << 1) |
(1 << (3 - strlen(new_guide)));
@@ -1170,7 +1174,7 @@ void ArmMir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
* overlap with either operand and send that case to a runtime handler.
*/
RegLocation rl_result;
- if (BadOverlap(rl_src1, rl_dest) || (BadOverlap(rl_src2, rl_dest))) {
+ if (PartiallyIntersects(rl_src1, rl_dest) || (PartiallyIntersects(rl_src2, rl_dest))) {
FlushAllRegs();
CallRuntimeHelperRegLocationRegLocation(kQuickLmul, rl_src1, rl_src2, false);
rl_result = GetReturnWide(kCoreReg);
@@ -1468,7 +1472,7 @@ void ArmMir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
StoreValueWide(rl_dest, rl_src);
return;
}
- if (BadOverlap(rl_src, rl_dest)) {
+ if (PartiallyIntersects(rl_src, rl_dest)) {
GenShiftOpLong(opcode, rl_dest, rl_src, rl_shift);
return;
}
@@ -1547,7 +1551,7 @@ void ArmMir2Lir::GenArithImmOpLong(Instruction::Code opcode,
std::swap(rl_src1, rl_src2);
}
}
- if (BadOverlap(rl_src1, rl_dest)) {
+ if (PartiallyIntersects(rl_src1, rl_dest)) {
GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
return;
}
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index bba1a8c65b..e833c9a629 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -494,7 +494,7 @@ LIR* ArmMir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, in
(value <= 1020) && ((value & 0x3) == 0)) {
return NewLIR3(kThumbAddPcRel, r_dest.GetReg(), r_src1.GetReg(), value >> 2);
}
- // Note: intentional fallthrough
+ FALLTHROUGH_INTENDED;
case kOpSub:
if (all_low_regs && ((abs_value & 0x7) == abs_value)) {
if (op == kOpAdd)
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 7c663a9418..e2ff090293 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -705,16 +705,16 @@ uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
switch (kind) {
case kFmtRegX:
want_64_bit = true;
- // Intentional fall-through.
+ FALLTHROUGH_INTENDED;
case kFmtRegW:
want_var_size = false;
- // Intentional fall-through.
+ FALLTHROUGH_INTENDED;
case kFmtRegR:
want_zero = true;
break;
case kFmtRegXOrSp:
want_64_bit = true;
- // Intentional fall-through.
+ FALLTHROUGH_INTENDED;
case kFmtRegWOrSp:
want_var_size = false;
break;
@@ -722,10 +722,10 @@ uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
break;
case kFmtRegD:
want_64_bit = true;
- // Intentional fall-through.
+ FALLTHROUGH_INTENDED;
case kFmtRegS:
want_var_size = false;
- // Intentional fall-through.
+ FALLTHROUGH_INTENDED;
case kFmtRegF:
want_float = true;
break;
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index 38670ff8be..0883694033 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -833,7 +833,7 @@ LIR* Arm64Mir2Lir::OpRegRegImm64(OpKind op, RegStorage r_dest, RegStorage r_src1
value);
case kOpAdd:
neg = !neg;
- // Note: intentional fallthrough
+ FALLTHROUGH_INTENDED;
case kOpSub:
// Add and sub below read/write sp rather than xzr.
if (abs_value < 0x1000) {
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index e18116ec3c..80a1ac4c52 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -1185,12 +1185,18 @@ int32_t Mir2Lir::LowestSetBit(uint64_t x) {
return bit_posn;
}
-bool Mir2Lir::BadOverlap(RegLocation rl_src, RegLocation rl_dest) {
+bool Mir2Lir::PartiallyIntersects(RegLocation rl_src, RegLocation rl_dest) {
DCHECK(rl_src.wide);
DCHECK(rl_dest.wide);
return (abs(mir_graph_->SRegToVReg(rl_src.s_reg_low) - mir_graph_->SRegToVReg(rl_dest.s_reg_low)) == 1);
}
+bool Mir2Lir::Intersects(RegLocation rl_src, RegLocation rl_dest) {
+ DCHECK(rl_src.wide);
+ DCHECK(rl_dest.wide);
+ return (abs(mir_graph_->SRegToVReg(rl_src.s_reg_low) - mir_graph_->SRegToVReg(rl_dest.s_reg_low)) <= 1);
+}
+
LIR *Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
int offset, int check_value, LIR* target, LIR** compare) {
// Handle this for architectures that can't compare to memory.
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 3a3821f800..12ca065de4 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -1785,7 +1785,7 @@ void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, Re
case Instruction::SUB_INT:
case Instruction::SUB_INT_2ADDR:
lit = -lit;
- // Intended fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::ADD_INT:
case Instruction::ADD_INT_2ADDR:
case Instruction::ADD_INT_LIT8:
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 408c73d1b0..2bef7c53c5 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -552,7 +552,8 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
} else {
break;
}
- // Intentional fallthrough for x86
+ DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
+ FALLTHROUGH_INTENDED;
default:
return -1;
}
@@ -596,7 +597,8 @@ static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, nullptr, cu, cg)) {
break; // kInvokeTgt := kArg0->entrypoint
}
- // Intentional fallthrough for X86
+ DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
+ FALLTHROUGH_INTENDED;
default:
return -1;
}
@@ -641,7 +643,8 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, nullptr, cu, cg)) {
break; // kInvokeTgt := kArg0->entrypoint
}
- // Intentional fallthrough for X86
+ DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
+ FALLTHROUGH_INTENDED;
default:
return -1;
}
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
index c7e9190ed9..01d1a1e0db 100644
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -465,6 +465,7 @@ void MipsMir2Lir::ConvertShortToLongBranch(LIR* lir) {
switch (opcode) {
case kMipsBal:
LOG(FATAL) << "long branch and link unsupported";
+ UNREACHABLE();
case kMipsB:
unconditional = true;
break;
@@ -478,6 +479,7 @@ void MipsMir2Lir::ConvertShortToLongBranch(LIR* lir) {
case kMipsBnez: opcode = kMipsBeqz; break;
default:
LOG(FATAL) << "Unexpected branch kind " << opcode;
+ UNREACHABLE();
}
LIR* hop_target = NULL;
if (!unconditional) {
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 0ac1299c5a..4399981272 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -482,7 +482,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
case Instruction::RETURN_OBJECT:
DCHECK(rl_src[0].ref);
- // Intentional fallthrough.
+ FALLTHROUGH_INTENDED;
case Instruction::RETURN:
if (!kLeafOptimization || !mir_graph_->MethodIsLeaf()) {
GenSuspendTest(opt_flags);
@@ -1031,8 +1031,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
GenArithImmOpLong(opcode, rl_dest, rl_src[0], rl_src[1]);
break;
}
- // Note: intentional fallthrough.
-
+ FALLTHROUGH_INTENDED;
case Instruction::MUL_LONG:
case Instruction::DIV_LONG:
case Instruction::REM_LONG:
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 858fb1c138..ea93bbe7f2 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -1487,7 +1487,18 @@ class Mir2Lir : public Backend {
* is not usual for dx to generate, but it is legal (for now). In a future rev of
* dex, we'll want to make this case illegal.
*/
- bool BadOverlap(RegLocation rl_op1, RegLocation rl_op2);
+ bool PartiallyIntersects(RegLocation rl_op1, RegLocation rl_op2);
+
+ /*
+ * @brief Do these SRs intersect?
+ * @param rl_op1 One RegLocation
+ * @param rl_op2 The other RegLocation
+ * @return 'true' if the VR pairs intersect
+ *
+ * Check to see if a result pair has misaligned overlap or
+ * full overlap with an operand pair.
+ */
+ bool Intersects(RegLocation rl_op1, RegLocation rl_op2);
/*
* @brief Force a location (in a register) into a temporary register
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 0902f3cfa2..07034cb8d7 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -1596,7 +1596,7 @@ bool X86Mir2Lir::GenMulLongConst(RegLocation rl_dest, RegLocation rl_src1, int64
return true;
} else if (IsPowerOfTwo(val)) {
int shift_amount = LowestSetBit(val);
- if (!BadOverlap(rl_src1, rl_dest)) {
+ if (!PartiallyIntersects(rl_src1, rl_dest)) {
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
RegLocation rl_result = GenShiftImmOpLong(Instruction::SHL_LONG, rl_dest, rl_src1,
shift_amount);
@@ -1808,7 +1808,6 @@ void X86Mir2Lir::GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src,
x86op = GetOpcode(op, rl_dest, rl_src, true);
NewLIR2(x86op, rl_dest.reg.GetHighReg(), rl_src.reg.GetHighReg());
- FreeTemp(rl_src.reg); // ???
}
return;
}
@@ -1842,6 +1841,14 @@ void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src, Instructi
GenLongRegOrMemOp(rl_result, rl_src, op);
StoreFinalValueWide(rl_dest, rl_result);
return;
+ } else if (!cu_->target64 && Intersects(rl_src, rl_dest)) {
+ // Handle the case when src and dest are intersect.
+ rl_src = LoadValueWide(rl_src, kCoreReg);
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ rl_src = UpdateLocWideTyped(rl_src, kCoreReg);
+ GenLongRegOrMemOp(rl_result, rl_src, op);
+ StoreFinalValueWide(rl_dest, rl_result);
+ return;
}
// It wasn't in registers, so it better be in memory.
@@ -1869,7 +1876,6 @@ void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src, Instructi
AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2,
false /* is_load */, true /* is64bit */);
}
- FreeTemp(rl_src.reg);
}
void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src1,
@@ -2479,7 +2485,7 @@ void X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest
GenArithOpLong(Instruction::ADD_LONG, rl_dest, rl_src, rl_src);
return;
}
- if (BadOverlap(rl_src, rl_dest)) {
+ if (PartiallyIntersects(rl_src, rl_dest)) {
GenShiftOpLong(opcode, rl_dest, rl_src, rl_shift);
return;
}
@@ -2924,25 +2930,25 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
break;
case Instruction::ADD_INT_2ADDR:
is_two_addr = true;
- // Fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::ADD_INT:
op = kOpAdd;
break;
case Instruction::SUB_INT_2ADDR:
is_two_addr = true;
- // Fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::SUB_INT:
op = kOpSub;
break;
case Instruction::MUL_INT_2ADDR:
is_two_addr = true;
- // Fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::MUL_INT:
op = kOpMul;
break;
case Instruction::DIV_INT_2ADDR:
is_two_addr = true;
- // Fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::DIV_INT:
op = kOpDiv;
is_div_rem = true;
@@ -2950,46 +2956,46 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
/* NOTE: returns in kArg1 */
case Instruction::REM_INT_2ADDR:
is_two_addr = true;
- // Fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::REM_INT:
op = kOpRem;
is_div_rem = true;
break;
case Instruction::AND_INT_2ADDR:
is_two_addr = true;
- // Fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::AND_INT:
op = kOpAnd;
break;
case Instruction::OR_INT_2ADDR:
is_two_addr = true;
- // Fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::OR_INT:
op = kOpOr;
break;
case Instruction::XOR_INT_2ADDR:
is_two_addr = true;
- // Fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::XOR_INT:
op = kOpXor;
break;
case Instruction::SHL_INT_2ADDR:
is_two_addr = true;
- // Fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::SHL_INT:
shift_op = true;
op = kOpLsl;
break;
case Instruction::SHR_INT_2ADDR:
is_two_addr = true;
- // Fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::SHR_INT:
shift_op = true;
op = kOpAsr;
break;
case Instruction::USHR_INT_2ADDR:
is_two_addr = true;
- // Fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::USHR_INT:
shift_op = true;
op = kOpLsr;
@@ -3239,19 +3245,19 @@ void X86Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
switch (opcode) {
case Instruction::SHL_LONG_2ADDR:
is_two_addr = true;
- // Fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::SHL_LONG:
op = kOpLsl;
break;
case Instruction::SHR_LONG_2ADDR:
is_two_addr = true;
- // Fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::SHR_LONG:
op = kOpAsr;
break;
case Instruction::USHR_LONG_2ADDR:
is_two_addr = true;
- // Fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::USHR_LONG:
op = kOpLsr;
break;
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index 6898b5078c..8d5dabc5fd 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -657,7 +657,8 @@ LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int
CHECK_EQ(is_array, false);
CHECK_EQ(r_dest.IsFloat(), false);
break;
- } // else fall-through to k32 case
+ }
+ FALLTHROUGH_INTENDED; // else fall-through to k32 case
case k32:
case kSingle:
case kReference: // TODO: update for reference decompression on 64-bit targets.
@@ -791,7 +792,7 @@ LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int
switch (size) {
case k64:
consider_non_temporal = true;
- // Fall through!
+ FALLTHROUGH_INTENDED;
case kDouble:
if (r_src.IsFloat()) {
opcode = is_array ? kX86MovsdAR : kX86MovsdMR;
@@ -810,7 +811,8 @@ LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int
CHECK_EQ(r_src.IsFloat(), false);
consider_non_temporal = true;
break;
- } // else fall-through to k32 case
+ }
+ FALLTHROUGH_INTENDED; // else fall-through to k32 case
case k32:
case kSingle:
case kReference:
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 3cc573b9c9..4388041fac 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -124,7 +124,7 @@ bool MIRGraph::FillDefBlockMatrix(BasicBlock* bb) {
for (uint32_t idx : bb->data_flow_info->def_v->Indexes()) {
/* Block bb defines register idx */
- def_block_matrix_[idx]->SetBit(bb->id);
+ temp_bit_matrix_[idx]->SetBit(bb->id);
}
return true;
}
@@ -132,15 +132,17 @@ bool MIRGraph::FillDefBlockMatrix(BasicBlock* bb) {
void MIRGraph::ComputeDefBlockMatrix() {
int num_registers = GetNumOfCodeAndTempVRs();
/* Allocate num_registers bit vector pointers */
- def_block_matrix_ = static_cast<ArenaBitVector**>
- (arena_->Alloc(sizeof(ArenaBitVector *) * num_registers,
- kArenaAllocDFInfo));
+ DCHECK(temp_scoped_alloc_ != nullptr);
+ DCHECK(temp_bit_matrix_ == nullptr);
+ temp_bit_matrix_ = static_cast<ArenaBitVector**>(
+ temp_scoped_alloc_->Alloc(sizeof(ArenaBitVector*) * num_registers, kArenaAllocDFInfo));
int i;
/* Initialize num_register vectors with num_blocks bits each */
for (i = 0; i < num_registers; i++) {
- def_block_matrix_[i] =
- new (arena_) ArenaBitVector(arena_, GetNumBlocks(), false, kBitMapBMatrix);
+ temp_bit_matrix_[i] = new (temp_scoped_alloc_.get()) ArenaBitVector(arena_, GetNumBlocks(),
+ false, kBitMapBMatrix);
+ temp_bit_matrix_[i]->ClearAllBits();
}
AllNodesIterator iter(this);
@@ -159,7 +161,7 @@ void MIRGraph::ComputeDefBlockMatrix() {
int num_regs = GetNumOfCodeVRs();
int in_reg = GetFirstInVR();
for (; in_reg < num_regs; in_reg++) {
- def_block_matrix_[in_reg]->SetBit(GetEntryBlock()->id);
+ temp_bit_matrix_[in_reg]->SetBit(GetEntryBlock()->id);
}
}
@@ -478,7 +480,7 @@ void MIRGraph::InsertPhiNodes() {
/* Iterate through each Dalvik register */
for (dalvik_reg = GetNumOfCodeAndTempVRs() - 1; dalvik_reg >= 0; dalvik_reg--) {
- input_blocks->Copy(def_block_matrix_[dalvik_reg]);
+ input_blocks->Copy(temp_bit_matrix_[dalvik_reg]);
phi_blocks->ClearAllBits();
do {
// TUNING: When we repeat this, we could skip indexes from the previous pass.
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index cdb816d560..fb648fc532 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -627,7 +627,7 @@ static void ResolveExceptionsForMethod(MutableMethodHelper* mh,
if (code_item->tries_size_ == 0) {
return; // nothing to process
}
- const byte* encoded_catch_handler_list = DexFile::GetCatchHandlerData(*code_item, 0);
+ const uint8_t* encoded_catch_handler_list = DexFile::GetCatchHandlerData(*code_item, 0);
size_t num_encoded_catch_handlers = DecodeUnsignedLeb128(&encoded_catch_handler_list);
for (size_t i = 0; i < num_encoded_catch_handlers; i++) {
int32_t encoded_catch_handler_size = DecodeSignedLeb128(&encoded_catch_handler_list);
@@ -1505,7 +1505,7 @@ static void ResolveClassFieldsAndMethods(const ParallelCompilationManager* manag
// Note the class_data pointer advances through the headers,
// static fields, instance fields, direct methods, and virtual
// methods.
- const byte* class_data = dex_file.GetClassData(class_def);
+ const uint8_t* class_data = dex_file.GetClassData(class_def);
if (class_data == nullptr) {
// Empty class such as a marker interface.
requires_constructor_barrier = false;
@@ -1882,7 +1882,7 @@ void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, siz
if (manager->GetCompiler()->verification_results_->IsClassRejected(ref)) {
return;
}
- const byte* class_data = dex_file.GetClassData(class_def);
+ const uint8_t* class_data = dex_file.GetClassData(class_def);
if (class_data == nullptr) {
// empty class, probably a marker interface
return;
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index 74ee038b63..c32bdb45dc 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_ELF_BUILDER_H_
#include "base/stl_util.h"
+#include "base/value_object.h"
#include "buffered_output_stream.h"
#include "elf_utils.h"
#include "file_output_stream.h"
@@ -26,11 +27,12 @@
namespace art {
template <typename Elf_Word, typename Elf_Sword, typename Elf_Shdr>
-class ElfSectionBuilder {
+class ElfSectionBuilder : public ValueObject {
public:
ElfSectionBuilder(const std::string& sec_name, Elf_Word type, Elf_Word flags,
const ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> *link, Elf_Word info,
- Elf_Word align, Elf_Word entsize) : name_(sec_name), link_(link) {
+ Elf_Word align, Elf_Word entsize)
+ : section_index_(0), name_(sec_name), link_(link) {
memset(&section_, 0, sizeof(section_));
section_.sh_type = type;
section_.sh_flags = flags;
@@ -39,23 +41,41 @@ class ElfSectionBuilder {
section_.sh_entsize = entsize;
}
- virtual ~ElfSectionBuilder() {}
+ ~ElfSectionBuilder() {}
- Elf_Shdr section_;
- Elf_Word section_index_ = 0;
+ Elf_Word GetLink() const {
+ return (link_ != nullptr) ? link_->section_index_ : 0;
+ }
- Elf_Word GetLink() {
- return (link_) ? link_->section_index_ : 0;
+ const Elf_Shdr* GetSection() const {
+ return &section_;
}
- const std::string name_;
+ Elf_Shdr* GetSection() {
+ return &section_;
+ }
- protected:
- const ElfSectionBuilder* link_;
+ Elf_Word GetSectionIndex() const {
+ return section_index_;
+ }
+
+ void SetSectionIndex(Elf_Word section_index) {
+ section_index_ = section_index;
+ }
+
+ const std::string& GetName() const {
+ return name_;
+ }
+
+ private:
+ Elf_Shdr section_;
+ Elf_Word section_index_;
+ const std::string name_;
+ const ElfSectionBuilder* const link_;
};
template <typename Elf_Word, typename Elf_Sword, typename Elf_Dyn, typename Elf_Shdr>
-class ElfDynamicBuilder : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> {
+class ElfDynamicBuilder FINAL : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> {
public:
void AddDynamicTag(Elf_Sword tag, Elf_Word d_un) {
if (tag == DT_NULL) {
@@ -65,7 +85,7 @@ class ElfDynamicBuilder : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr
}
void AddDynamicTag(Elf_Sword tag, Elf_Word d_un,
- ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* section) {
+ const ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* section) {
if (tag == DT_NULL) {
return;
}
@@ -78,7 +98,7 @@ class ElfDynamicBuilder : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr
link, 0, kPageSize, sizeof(Elf_Dyn)) {}
~ElfDynamicBuilder() {}
- Elf_Word GetSize() {
+ Elf_Word GetSize() const {
// Add 1 for the DT_NULL, 1 for DT_STRSZ, and 1 for DT_SONAME. All of
// these must be added when we actually put the file together because
// their values are very dependent on state.
@@ -89,13 +109,13 @@ class ElfDynamicBuilder : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr
// table and soname_off should be the offset of the soname in .dynstr.
// Since niether can be found prior to final layout we will wait until here
// to add them.
- std::vector<Elf_Dyn> GetDynamics(Elf_Word strsz, Elf_Word soname) {
+ std::vector<Elf_Dyn> GetDynamics(Elf_Word strsz, Elf_Word soname) const {
std::vector<Elf_Dyn> ret;
for (auto it = dynamics_.cbegin(); it != dynamics_.cend(); ++it) {
- if (it->section_) {
+ if (it->section_ != nullptr) {
// We are adding an address relative to a section.
ret.push_back(
- {it->tag_, {it->off_ + it->section_->section_.sh_addr}});
+ {it->tag_, {it->off_ + it->section_->GetSection()->sh_addr}});
} else {
ret.push_back({it->tag_, {it->off_}});
}
@@ -106,9 +126,9 @@ class ElfDynamicBuilder : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr
return ret;
}
- protected:
+ private:
struct ElfDynamicState {
- ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* section_;
+ const ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* section_;
Elf_Sword tag_;
Elf_Word off_;
};
@@ -116,39 +136,50 @@ class ElfDynamicBuilder : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr
};
template <typename Elf_Word, typename Elf_Sword, typename Elf_Shdr>
-class ElfRawSectionBuilder : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> {
+class ElfRawSectionBuilder FINAL : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> {
public:
ElfRawSectionBuilder(const std::string& sec_name, Elf_Word type, Elf_Word flags,
const ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* link, Elf_Word info,
Elf_Word align, Elf_Word entsize)
: ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>(sec_name, type, flags, link, info, align,
- entsize) {}
+ entsize) {
+ }
+
~ElfRawSectionBuilder() {}
- std::vector<uint8_t>* GetBuffer() { return &buf_; }
- void SetBuffer(std::vector<uint8_t>&& buf) { buf_ = buf; }
- protected:
+ std::vector<uint8_t>* GetBuffer() {
+ return &buf_;
+ }
+
+ void SetBuffer(const std::vector<uint8_t>& buf) {
+ buf_ = buf;
+ }
+
+ private:
std::vector<uint8_t> buf_;
};
template <typename Elf_Word, typename Elf_Sword, typename Elf_Shdr>
-class ElfOatSectionBuilder : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> {
+class ElfOatSectionBuilder FINAL : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> {
public:
ElfOatSectionBuilder(const std::string& sec_name, Elf_Word size, Elf_Word offset,
Elf_Word type, Elf_Word flags)
: ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>(sec_name, type, flags, nullptr, 0, kPageSize,
- 0), offset_(offset), size_(size) {}
+ 0),
+ offset_(offset), size_(size) {
+ }
+
~ElfOatSectionBuilder() {}
- Elf_Word GetOffset() {
+ Elf_Word GetOffset() const {
return offset_;
}
- Elf_Word GetSize() {
+ Elf_Word GetSize() const {
return size_;
}
- protected:
+ private:
// Offset of the content within the file.
Elf_Word offset_;
// Size of the content within the file.
@@ -175,7 +206,7 @@ static inline unsigned elfhash(const char *_name) {
template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr, typename Elf_Sym,
typename Elf_Shdr>
-class ElfSymtabBuilder : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> {
+class ElfSymtabBuilder FINAL : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> {
public:
// Add a symbol with given name to this symtab. The symbol refers to
// 'relative_addr' within the given section and has the given attributes.
@@ -202,10 +233,12 @@ class ElfSymtabBuilder : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>
strtab_(str_name,
str_type,
((alloc) ? SHF_ALLOC : 0U),
- nullptr, 0, 1, 1) {}
+ nullptr, 0, 1, 1) {
+ }
+
~ElfSymtabBuilder() {}
- std::vector<Elf_Word> GenerateHashContents() {
+ std::vector<Elf_Word> GenerateHashContents() const {
// Here is how The ELF hash table works.
// There are 3 arrays to worry about.
// * The symbol table where the symbol information is.
@@ -295,7 +328,7 @@ class ElfSymtabBuilder : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>
tab += it->name_;
tab += '\0';
}
- strtab_.section_.sh_size = tab.size();
+ strtab_.GetSection()->sh_size = tab.size();
return tab;
}
@@ -311,13 +344,13 @@ class ElfSymtabBuilder : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>
memset(&sym, 0, sizeof(sym));
sym.st_name = it->name_idx_;
if (it->is_relative_) {
- sym.st_value = it->addr_ + it->section_->section_.sh_offset;
+ sym.st_value = it->addr_ + it->section_->GetSection()->sh_offset;
} else {
sym.st_value = it->addr_;
}
sym.st_size = it->size_;
sym.st_other = it->other_;
- sym.st_shndx = it->section_->section_index_;
+ sym.st_shndx = it->section_->GetSectionIndex();
sym.st_info = it->info_;
ret.push_back(sym);
@@ -325,7 +358,7 @@ class ElfSymtabBuilder : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>
return ret;
}
- Elf_Word GetSize() {
+ Elf_Word GetSize() const {
// 1 is for the implicit NULL symbol.
return symbols_.size() + 1;
}
@@ -334,7 +367,7 @@ class ElfSymtabBuilder : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>
return &strtab_;
}
- protected:
+ private:
struct ElfSymbolState {
const std::string name_;
const ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* section_;
@@ -377,18 +410,26 @@ class ElfFilePiece {
protected:
explicit ElfFilePiece(Elf_Word offset) : offset_(offset) {}
- virtual std::string GetDescription() = 0;
+ Elf_Word GetOffset() const {
+ return offset_;
+ }
+
+ virtual const char* GetDescription() const = 0;
virtual bool DoActualWrite(File* elf_file) = 0;
- Elf_Word offset_;
+ private:
+ const Elf_Word offset_;
+
+ DISALLOW_COPY_AND_ASSIGN(ElfFilePiece);
};
template <typename Elf_Word>
-class ElfFileMemoryPiece : public ElfFilePiece<Elf_Word> {
+class ElfFileMemoryPiece FINAL : public ElfFilePiece<Elf_Word> {
public:
ElfFileMemoryPiece(const std::string& name, Elf_Word offset, const void* data, Elf_Word size)
: ElfFilePiece<Elf_Word>(offset), dbg_name_(name), data_(data), size_(size) {}
+ protected:
bool DoActualWrite(File* elf_file) OVERRIDE {
DCHECK(data_ != nullptr || size_ == 0U) << dbg_name_ << " " << size_;
@@ -400,8 +441,8 @@ class ElfFileMemoryPiece : public ElfFilePiece<Elf_Word> {
return true;
}
- std::string GetDescription() OVERRIDE {
- return dbg_name_;
+ const char* GetDescription() const OVERRIDE {
+ return dbg_name_.c_str();
}
private:
@@ -418,13 +459,14 @@ class CodeOutput {
};
template <typename Elf_Word>
-class ElfFileRodataPiece : public ElfFilePiece<Elf_Word> {
+class ElfFileRodataPiece FINAL : public ElfFilePiece<Elf_Word> {
public:
ElfFileRodataPiece(Elf_Word offset, CodeOutput* output) : ElfFilePiece<Elf_Word>(offset),
output_(output) {}
+ protected:
bool DoActualWrite(File* elf_file) OVERRIDE {
- output_->SetCodeOffset(this->offset_);
+ output_->SetCodeOffset(this->GetOffset());
std::unique_ptr<BufferedOutputStream> output_stream(
new BufferedOutputStream(new FileOutputStream(elf_file)));
if (!output_->Write(output_stream.get())) {
@@ -435,20 +477,23 @@ class ElfFileRodataPiece : public ElfFilePiece<Elf_Word> {
return true;
}
- std::string GetDescription() OVERRIDE {
+ const char* GetDescription() const OVERRIDE {
return ".rodata";
}
private:
- CodeOutput* output_;
+ CodeOutput* const output_;
+
+ DISALLOW_COPY_AND_ASSIGN(ElfFileRodataPiece);
};
template <typename Elf_Word>
-class ElfFileOatTextPiece : public ElfFilePiece<Elf_Word> {
+class ElfFileOatTextPiece FINAL : public ElfFilePiece<Elf_Word> {
public:
ElfFileOatTextPiece(Elf_Word offset, CodeOutput* output) : ElfFilePiece<Elf_Word>(offset),
output_(output) {}
+ protected:
bool DoActualWrite(File* elf_file) OVERRIDE {
// All data is written by the ElfFileRodataPiece right now, as the oat writer writes in one
// piece. This is for future flexibility.
@@ -456,12 +501,14 @@ class ElfFileOatTextPiece : public ElfFilePiece<Elf_Word> {
return true;
}
- std::string GetDescription() OVERRIDE {
+ const char* GetDescription() const OVERRIDE {
return ".text";
}
private:
- CodeOutput* output_;
+ CodeOutput* const output_;
+
+ DISALLOW_COPY_AND_ASSIGN(ElfFileOatTextPiece);
};
template <typename Elf_Word>
@@ -513,6 +560,14 @@ class ElfBuilder FINAL {
}
~ElfBuilder() {}
+ const ElfOatSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>& GetTextBuilder() const {
+ return text_builder_;
+ }
+
+ ElfSymtabBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Sym, Elf_Shdr>* GetSymtabBuilder() {
+ return &symtab_builder_;
+ }
+
bool Init() {
// The basic layout of the elf file. Order may be different in final output.
// +-------------------------+
@@ -676,34 +731,40 @@ class ElfBuilder FINAL {
section_index_ = 1;
// setup .dynsym
- section_ptrs_.push_back(&dynsym_builder_.section_);
+ section_ptrs_.push_back(dynsym_builder_.GetSection());
AssignSectionStr(&dynsym_builder_, &shstrtab_);
- dynsym_builder_.section_index_ = section_index_++;
+ dynsym_builder_.SetSectionIndex(section_index_);
+ section_index_++;
// Setup .dynstr
- section_ptrs_.push_back(&dynsym_builder_.GetStrTab()->section_);
+ section_ptrs_.push_back(dynsym_builder_.GetStrTab()->GetSection());
AssignSectionStr(dynsym_builder_.GetStrTab(), &shstrtab_);
- dynsym_builder_.GetStrTab()->section_index_ = section_index_++;
+ dynsym_builder_.GetStrTab()->SetSectionIndex(section_index_);
+ section_index_++;
// Setup .hash
- section_ptrs_.push_back(&hash_builder_.section_);
+ section_ptrs_.push_back(hash_builder_.GetSection());
AssignSectionStr(&hash_builder_, &shstrtab_);
- hash_builder_.section_index_ = section_index_++;
+ hash_builder_.SetSectionIndex(section_index_);
+ section_index_++;
// Setup .rodata
- section_ptrs_.push_back(&rodata_builder_.section_);
+ section_ptrs_.push_back(rodata_builder_.GetSection());
AssignSectionStr(&rodata_builder_, &shstrtab_);
- rodata_builder_.section_index_ = section_index_++;
+ rodata_builder_.SetSectionIndex(section_index_);
+ section_index_++;
// Setup .text
- section_ptrs_.push_back(&text_builder_.section_);
+ section_ptrs_.push_back(text_builder_.GetSection());
AssignSectionStr(&text_builder_, &shstrtab_);
- text_builder_.section_index_ = section_index_++;
+ text_builder_.SetSectionIndex(section_index_);
+ section_index_++;
// Setup .dynamic
- section_ptrs_.push_back(&dynamic_builder_.section_);
+ section_ptrs_.push_back(dynamic_builder_.GetSection());
AssignSectionStr(&dynamic_builder_, &shstrtab_);
- dynamic_builder_.section_index_ = section_index_++;
+ dynamic_builder_.SetSectionIndex(section_index_);
+ section_index_++;
// Fill in the hash section.
hash_ = dynsym_builder_.GenerateHashContents();
@@ -718,64 +779,67 @@ class ElfBuilder FINAL {
// Get the layout in the sections.
//
// Get the layout of the dynsym section.
- dynsym_builder_.section_.sh_offset = RoundUp(base_offset, dynsym_builder_.section_.sh_addralign);
- dynsym_builder_.section_.sh_addr = dynsym_builder_.section_.sh_offset;
- dynsym_builder_.section_.sh_size = dynsym_builder_.GetSize() * sizeof(Elf_Sym);
- dynsym_builder_.section_.sh_link = dynsym_builder_.GetLink();
+ dynsym_builder_.GetSection()->sh_offset =
+ RoundUp(base_offset, dynsym_builder_.GetSection()->sh_addralign);
+ dynsym_builder_.GetSection()->sh_addr = dynsym_builder_.GetSection()->sh_offset;
+ dynsym_builder_.GetSection()->sh_size = dynsym_builder_.GetSize() * sizeof(Elf_Sym);
+ dynsym_builder_.GetSection()->sh_link = dynsym_builder_.GetLink();
// Get the layout of the dynstr section.
- dynsym_builder_.GetStrTab()->section_.sh_offset = NextOffset<Elf_Word, Elf_Shdr>
- (dynsym_builder_.GetStrTab()->section_,
- dynsym_builder_.section_);
- dynsym_builder_.GetStrTab()->section_.sh_addr = dynsym_builder_.GetStrTab()->section_.sh_offset;
- dynsym_builder_.GetStrTab()->section_.sh_size = dynstr_.size();
- dynsym_builder_.GetStrTab()->section_.sh_link = dynsym_builder_.GetStrTab()->GetLink();
+ dynsym_builder_.GetStrTab()->GetSection()->sh_offset =
+ NextOffset<Elf_Word, Elf_Shdr>(*dynsym_builder_.GetStrTab()->GetSection(),
+ *dynsym_builder_.GetSection());
+ dynsym_builder_.GetStrTab()->GetSection()->sh_addr =
+ dynsym_builder_.GetStrTab()->GetSection()->sh_offset;
+ dynsym_builder_.GetStrTab()->GetSection()->sh_size = dynstr_.size();
+ dynsym_builder_.GetStrTab()->GetSection()->sh_link = dynsym_builder_.GetStrTab()->GetLink();
// Get the layout of the hash section
- hash_builder_.section_.sh_offset = NextOffset<Elf_Word, Elf_Shdr>
- (hash_builder_.section_,
- dynsym_builder_.GetStrTab()->section_);
- hash_builder_.section_.sh_addr = hash_builder_.section_.sh_offset;
- hash_builder_.section_.sh_size = hash_.size() * sizeof(Elf_Word);
- hash_builder_.section_.sh_link = hash_builder_.GetLink();
+ hash_builder_.GetSection()->sh_offset =
+ NextOffset<Elf_Word, Elf_Shdr>(*hash_builder_.GetSection(),
+ *dynsym_builder_.GetStrTab()->GetSection());
+ hash_builder_.GetSection()->sh_addr = hash_builder_.GetSection()->sh_offset;
+ hash_builder_.GetSection()->sh_size = hash_.size() * sizeof(Elf_Word);
+ hash_builder_.GetSection()->sh_link = hash_builder_.GetLink();
// Get the layout of the rodata section.
- rodata_builder_.section_.sh_offset = NextOffset<Elf_Word, Elf_Shdr>
- (rodata_builder_.section_,
- hash_builder_.section_);
- rodata_builder_.section_.sh_addr = rodata_builder_.section_.sh_offset;
- rodata_builder_.section_.sh_size = rodata_builder_.GetSize();
- rodata_builder_.section_.sh_link = rodata_builder_.GetLink();
+ rodata_builder_.GetSection()->sh_offset =
+ NextOffset<Elf_Word, Elf_Shdr>(*rodata_builder_.GetSection(),
+ *hash_builder_.GetSection());
+ rodata_builder_.GetSection()->sh_addr = rodata_builder_.GetSection()->sh_offset;
+ rodata_builder_.GetSection()->sh_size = rodata_builder_.GetSize();
+ rodata_builder_.GetSection()->sh_link = rodata_builder_.GetLink();
// Get the layout of the text section.
- text_builder_.section_.sh_offset = NextOffset<Elf_Word, Elf_Shdr>
- (text_builder_.section_, rodata_builder_.section_);
- text_builder_.section_.sh_addr = text_builder_.section_.sh_offset;
- text_builder_.section_.sh_size = text_builder_.GetSize();
- text_builder_.section_.sh_link = text_builder_.GetLink();
- CHECK_ALIGNED(rodata_builder_.section_.sh_offset + rodata_builder_.section_.sh_size, kPageSize);
+ text_builder_.GetSection()->sh_offset =
+ NextOffset<Elf_Word, Elf_Shdr>(*text_builder_.GetSection(),
+ *rodata_builder_.GetSection());
+ text_builder_.GetSection()->sh_addr = text_builder_.GetSection()->sh_offset;
+ text_builder_.GetSection()->sh_size = text_builder_.GetSize();
+ text_builder_.GetSection()->sh_link = text_builder_.GetLink();
+ CHECK_ALIGNED(rodata_builder_.GetSection()->sh_offset +
+ rodata_builder_.GetSection()->sh_size, kPageSize);
// Get the layout of the dynamic section.
- dynamic_builder_.section_.sh_offset = NextOffset<Elf_Word, Elf_Shdr>
- (dynamic_builder_.section_,
- text_builder_.section_);
- dynamic_builder_.section_.sh_addr = dynamic_builder_.section_.sh_offset;
- dynamic_builder_.section_.sh_size = dynamic_builder_.GetSize() * sizeof(Elf_Dyn);
- dynamic_builder_.section_.sh_link = dynamic_builder_.GetLink();
+ dynamic_builder_.GetSection()->sh_offset =
+ NextOffset<Elf_Word, Elf_Shdr>(*dynamic_builder_.GetSection(), *text_builder_.GetSection());
+ dynamic_builder_.GetSection()->sh_addr = dynamic_builder_.GetSection()->sh_offset;
+ dynamic_builder_.GetSection()->sh_size = dynamic_builder_.GetSize() * sizeof(Elf_Dyn);
+ dynamic_builder_.GetSection()->sh_link = dynamic_builder_.GetLink();
if (debug_logging_) {
- LOG(INFO) << "dynsym off=" << dynsym_builder_.section_.sh_offset
- << " dynsym size=" << dynsym_builder_.section_.sh_size;
- LOG(INFO) << "dynstr off=" << dynsym_builder_.GetStrTab()->section_.sh_offset
- << " dynstr size=" << dynsym_builder_.GetStrTab()->section_.sh_size;
- LOG(INFO) << "hash off=" << hash_builder_.section_.sh_offset
- << " hash size=" << hash_builder_.section_.sh_size;
- LOG(INFO) << "rodata off=" << rodata_builder_.section_.sh_offset
- << " rodata size=" << rodata_builder_.section_.sh_size;
- LOG(INFO) << "text off=" << text_builder_.section_.sh_offset
- << " text size=" << text_builder_.section_.sh_size;
- LOG(INFO) << "dynamic off=" << dynamic_builder_.section_.sh_offset
- << " dynamic size=" << dynamic_builder_.section_.sh_size;
+ LOG(INFO) << "dynsym off=" << dynsym_builder_.GetSection()->sh_offset
+ << " dynsym size=" << dynsym_builder_.GetSection()->sh_size;
+ LOG(INFO) << "dynstr off=" << dynsym_builder_.GetStrTab()->GetSection()->sh_offset
+ << " dynstr size=" << dynsym_builder_.GetStrTab()->GetSection()->sh_size;
+ LOG(INFO) << "hash off=" << hash_builder_.GetSection()->sh_offset
+ << " hash size=" << hash_builder_.GetSection()->sh_size;
+ LOG(INFO) << "rodata off=" << rodata_builder_.GetSection()->sh_offset
+ << " rodata size=" << rodata_builder_.GetSection()->sh_size;
+ LOG(INFO) << "text off=" << text_builder_.GetSection()->sh_offset
+ << " text size=" << text_builder_.GetSection()->sh_size;
+ LOG(INFO) << "dynamic off=" << dynamic_builder_.GetSection()->sh_offset
+ << " dynamic size=" << dynamic_builder_.GetSection()->sh_size;
}
return true;
@@ -783,19 +847,21 @@ class ElfBuilder FINAL {
bool Write() {
std::vector<ElfFilePiece<Elf_Word>*> pieces;
- Elf_Shdr prev = dynamic_builder_.section_;
+ Elf_Shdr* prev = dynamic_builder_.GetSection();
std::string strtab;
if (IncludingDebugSymbols()) {
// Setup .symtab
- section_ptrs_.push_back(&symtab_builder_.section_);
+ section_ptrs_.push_back(symtab_builder_.GetSection());
AssignSectionStr(&symtab_builder_, &shstrtab_);
- symtab_builder_.section_index_ = section_index_++;
+ symtab_builder_.SetSectionIndex(section_index_);
+ section_index_++;
// Setup .strtab
- section_ptrs_.push_back(&symtab_builder_.GetStrTab()->section_);
+ section_ptrs_.push_back(symtab_builder_.GetStrTab()->GetSection());
AssignSectionStr(symtab_builder_.GetStrTab(), &shstrtab_);
- symtab_builder_.GetStrTab()->section_index_ = section_index_++;
+ symtab_builder_.GetStrTab()->SetSectionIndex(section_index_);
+ section_index_++;
strtab = symtab_builder_.GenerateStrtab();
if (debug_logging_) {
@@ -810,15 +876,17 @@ class ElfBuilder FINAL {
for (ElfRawSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> *builder = other_builders_.data(),
*end = builder + other_builders_.size();
builder != end; ++builder) {
- section_ptrs_.push_back(&builder->section_);
+ section_ptrs_.push_back(builder->GetSection());
AssignSectionStr(builder, &shstrtab_);
- builder->section_index_ = section_index_++;
+ builder->SetSectionIndex(section_index_);
+ section_index_++;
}
// Setup shstrtab
- section_ptrs_.push_back(&shstrtab_builder_.section_);
+ section_ptrs_.push_back(shstrtab_builder_.GetSection());
AssignSectionStr(&shstrtab_builder_, &shstrtab_);
- shstrtab_builder_.section_index_ = section_index_++;
+ shstrtab_builder_.SetSectionIndex(section_index_);
+ section_index_++;
if (debug_logging_) {
LOG(INFO) << ".shstrtab size (bytes) =" << shstrtab_.size()
@@ -829,71 +897,71 @@ class ElfBuilder FINAL {
if (IncludingDebugSymbols()) {
// Get the layout of the symtab section.
- symtab_builder_.section_.sh_offset = NextOffset<Elf_Word, Elf_Shdr>
- (symtab_builder_.section_,
- dynamic_builder_.section_);
- symtab_builder_.section_.sh_addr = 0;
+ symtab_builder_.GetSection()->sh_offset =
+ NextOffset<Elf_Word, Elf_Shdr>(*symtab_builder_.GetSection(),
+ *dynamic_builder_.GetSection());
+ symtab_builder_.GetSection()->sh_addr = 0;
// Add to leave space for the null symbol.
- symtab_builder_.section_.sh_size = symtab_builder_.GetSize() * sizeof(Elf_Sym);
- symtab_builder_.section_.sh_link = symtab_builder_.GetLink();
+ symtab_builder_.GetSection()->sh_size = symtab_builder_.GetSize() * sizeof(Elf_Sym);
+ symtab_builder_.GetSection()->sh_link = symtab_builder_.GetLink();
// Get the layout of the dynstr section.
- symtab_builder_.GetStrTab()->section_.sh_offset = NextOffset<Elf_Word, Elf_Shdr>
- (symtab_builder_.GetStrTab()->section_,
- symtab_builder_.section_);
- symtab_builder_.GetStrTab()->section_.sh_addr = 0;
- symtab_builder_.GetStrTab()->section_.sh_size = strtab.size();
- symtab_builder_.GetStrTab()->section_.sh_link = symtab_builder_.GetStrTab()->GetLink();
-
- prev = symtab_builder_.GetStrTab()->section_;
+ symtab_builder_.GetStrTab()->GetSection()->sh_offset =
+ NextOffset<Elf_Word, Elf_Shdr>(*symtab_builder_.GetStrTab()->GetSection(),
+ *symtab_builder_.GetSection());
+ symtab_builder_.GetStrTab()->GetSection()->sh_addr = 0;
+ symtab_builder_.GetStrTab()->GetSection()->sh_size = strtab.size();
+ symtab_builder_.GetStrTab()->GetSection()->sh_link = symtab_builder_.GetStrTab()->GetLink();
+
+ prev = symtab_builder_.GetStrTab()->GetSection();
if (debug_logging_) {
- LOG(INFO) << "symtab off=" << symtab_builder_.section_.sh_offset
- << " symtab size=" << symtab_builder_.section_.sh_size;
- LOG(INFO) << "strtab off=" << symtab_builder_.GetStrTab()->section_.sh_offset
- << " strtab size=" << symtab_builder_.GetStrTab()->section_.sh_size;
+ LOG(INFO) << "symtab off=" << symtab_builder_.GetSection()->sh_offset
+ << " symtab size=" << symtab_builder_.GetSection()->sh_size;
+ LOG(INFO) << "strtab off=" << symtab_builder_.GetStrTab()->GetSection()->sh_offset
+ << " strtab size=" << symtab_builder_.GetStrTab()->GetSection()->sh_size;
}
}
// Get the layout of the extra sections. (This will deal with the debug
// sections if they are there)
for (auto it = other_builders_.begin(); it != other_builders_.end(); ++it) {
- it->section_.sh_offset = NextOffset<Elf_Word, Elf_Shdr>(it->section_, prev);
- it->section_.sh_addr = 0;
- it->section_.sh_size = it->GetBuffer()->size();
- it->section_.sh_link = it->GetLink();
+ it->GetSection()->sh_offset = NextOffset<Elf_Word, Elf_Shdr>(*it->GetSection(), *prev);
+ it->GetSection()->sh_addr = 0;
+ it->GetSection()->sh_size = it->GetBuffer()->size();
+ it->GetSection()->sh_link = it->GetLink();
// We postpone adding an ElfFilePiece to keep the order in "pieces."
- prev = it->section_;
+ prev = it->GetSection();
if (debug_logging_) {
- LOG(INFO) << it->name_ << " off=" << it->section_.sh_offset
- << " size=" << it->section_.sh_size;
+ LOG(INFO) << it->GetName() << " off=" << it->GetSection()->sh_offset
+ << " size=" << it->GetSection()->sh_size;
}
}
// Get the layout of the shstrtab section
- shstrtab_builder_.section_.sh_offset = NextOffset<Elf_Word, Elf_Shdr>
- (shstrtab_builder_.section_, prev);
- shstrtab_builder_.section_.sh_addr = 0;
- shstrtab_builder_.section_.sh_size = shstrtab_.size();
- shstrtab_builder_.section_.sh_link = shstrtab_builder_.GetLink();
+ shstrtab_builder_.GetSection()->sh_offset =
+ NextOffset<Elf_Word, Elf_Shdr>(*shstrtab_builder_.GetSection(), *prev);
+ shstrtab_builder_.GetSection()->sh_addr = 0;
+ shstrtab_builder_.GetSection()->sh_size = shstrtab_.size();
+ shstrtab_builder_.GetSection()->sh_link = shstrtab_builder_.GetLink();
if (debug_logging_) {
- LOG(INFO) << "shstrtab off=" << shstrtab_builder_.section_.sh_offset
- << " shstrtab size=" << shstrtab_builder_.section_.sh_size;
+ LOG(INFO) << "shstrtab off=" << shstrtab_builder_.GetSection()->sh_offset
+ << " shstrtab size=" << shstrtab_builder_.GetSection()->sh_size;
}
// The section list comes after come after.
Elf_Word sections_offset = RoundUp(
- shstrtab_builder_.section_.sh_offset + shstrtab_builder_.section_.sh_size,
+ shstrtab_builder_.GetSection()->sh_offset + shstrtab_builder_.GetSection()->sh_size,
sizeof(Elf_Word));
// Setup the actual symbol arrays.
std::vector<Elf_Sym> dynsym = dynsym_builder_.GenerateSymtab();
- CHECK_EQ(dynsym.size() * sizeof(Elf_Sym), dynsym_builder_.section_.sh_size);
+ CHECK_EQ(dynsym.size() * sizeof(Elf_Sym), dynsym_builder_.GetSection()->sh_size);
std::vector<Elf_Sym> symtab;
if (IncludingDebugSymbols()) {
symtab = symtab_builder_.GenerateSymtab();
- CHECK_EQ(symtab.size() * sizeof(Elf_Sym), symtab_builder_.section_.sh_size);
+ CHECK_EQ(symtab.size() * sizeof(Elf_Sym), symtab_builder_.GetSection()->sh_size);
}
// Setup the dynamic section.
@@ -901,43 +969,44 @@ class ElfBuilder FINAL {
// and the soname_offset.
std::vector<Elf_Dyn> dynamic = dynamic_builder_.GetDynamics(dynstr_.size(),
dynstr_soname_offset_);
- CHECK_EQ(dynamic.size() * sizeof(Elf_Dyn), dynamic_builder_.section_.sh_size);
+ CHECK_EQ(dynamic.size() * sizeof(Elf_Dyn), dynamic_builder_.GetSection()->sh_size);
// Finish setup of the program headers now that we know the layout of the
// whole file.
- Elf_Word load_r_size = rodata_builder_.section_.sh_offset + rodata_builder_.section_.sh_size;
+ Elf_Word load_r_size =
+ rodata_builder_.GetSection()->sh_offset + rodata_builder_.GetSection()->sh_size;
program_headers_[PH_LOAD_R__].p_filesz = load_r_size;
program_headers_[PH_LOAD_R__].p_memsz = load_r_size;
- program_headers_[PH_LOAD_R__].p_align = rodata_builder_.section_.sh_addralign;
+ program_headers_[PH_LOAD_R__].p_align = rodata_builder_.GetSection()->sh_addralign;
- Elf_Word load_rx_size = text_builder_.section_.sh_size;
- program_headers_[PH_LOAD_R_X].p_offset = text_builder_.section_.sh_offset;
- program_headers_[PH_LOAD_R_X].p_vaddr = text_builder_.section_.sh_offset;
- program_headers_[PH_LOAD_R_X].p_paddr = text_builder_.section_.sh_offset;
+ Elf_Word load_rx_size = text_builder_.GetSection()->sh_size;
+ program_headers_[PH_LOAD_R_X].p_offset = text_builder_.GetSection()->sh_offset;
+ program_headers_[PH_LOAD_R_X].p_vaddr = text_builder_.GetSection()->sh_offset;
+ program_headers_[PH_LOAD_R_X].p_paddr = text_builder_.GetSection()->sh_offset;
program_headers_[PH_LOAD_R_X].p_filesz = load_rx_size;
program_headers_[PH_LOAD_R_X].p_memsz = load_rx_size;
- program_headers_[PH_LOAD_R_X].p_align = text_builder_.section_.sh_addralign;
-
- program_headers_[PH_LOAD_RW_].p_offset = dynamic_builder_.section_.sh_offset;
- program_headers_[PH_LOAD_RW_].p_vaddr = dynamic_builder_.section_.sh_offset;
- program_headers_[PH_LOAD_RW_].p_paddr = dynamic_builder_.section_.sh_offset;
- program_headers_[PH_LOAD_RW_].p_filesz = dynamic_builder_.section_.sh_size;
- program_headers_[PH_LOAD_RW_].p_memsz = dynamic_builder_.section_.sh_size;
- program_headers_[PH_LOAD_RW_].p_align = dynamic_builder_.section_.sh_addralign;
-
- program_headers_[PH_DYNAMIC].p_offset = dynamic_builder_.section_.sh_offset;
- program_headers_[PH_DYNAMIC].p_vaddr = dynamic_builder_.section_.sh_offset;
- program_headers_[PH_DYNAMIC].p_paddr = dynamic_builder_.section_.sh_offset;
- program_headers_[PH_DYNAMIC].p_filesz = dynamic_builder_.section_.sh_size;
- program_headers_[PH_DYNAMIC].p_memsz = dynamic_builder_.section_.sh_size;
- program_headers_[PH_DYNAMIC].p_align = dynamic_builder_.section_.sh_addralign;
+ program_headers_[PH_LOAD_R_X].p_align = text_builder_.GetSection()->sh_addralign;
+
+ program_headers_[PH_LOAD_RW_].p_offset = dynamic_builder_.GetSection()->sh_offset;
+ program_headers_[PH_LOAD_RW_].p_vaddr = dynamic_builder_.GetSection()->sh_offset;
+ program_headers_[PH_LOAD_RW_].p_paddr = dynamic_builder_.GetSection()->sh_offset;
+ program_headers_[PH_LOAD_RW_].p_filesz = dynamic_builder_.GetSection()->sh_size;
+ program_headers_[PH_LOAD_RW_].p_memsz = dynamic_builder_.GetSection()->sh_size;
+ program_headers_[PH_LOAD_RW_].p_align = dynamic_builder_.GetSection()->sh_addralign;
+
+ program_headers_[PH_DYNAMIC].p_offset = dynamic_builder_.GetSection()->sh_offset;
+ program_headers_[PH_DYNAMIC].p_vaddr = dynamic_builder_.GetSection()->sh_offset;
+ program_headers_[PH_DYNAMIC].p_paddr = dynamic_builder_.GetSection()->sh_offset;
+ program_headers_[PH_DYNAMIC].p_filesz = dynamic_builder_.GetSection()->sh_size;
+ program_headers_[PH_DYNAMIC].p_memsz = dynamic_builder_.GetSection()->sh_size;
+ program_headers_[PH_DYNAMIC].p_align = dynamic_builder_.GetSection()->sh_addralign;
// Finish setup of the Ehdr values.
elf_header_.e_phoff = PHDR_OFFSET;
elf_header_.e_shoff = sections_offset;
elf_header_.e_phnum = PH_NUM;
elf_header_.e_shnum = section_ptrs_.size();
- elf_header_.e_shstrndx = shstrtab_builder_.section_index_;
+ elf_header_.e_shstrndx = shstrtab_builder_.GetSectionIndex();
// Add the rest of the pieces to the list.
pieces.push_back(new ElfFileMemoryPiece<Elf_Word>("Elf Header", 0, &elf_header_,
@@ -945,33 +1014,33 @@ class ElfBuilder FINAL {
pieces.push_back(new ElfFileMemoryPiece<Elf_Word>("Program headers", PHDR_OFFSET,
&program_headers_, sizeof(program_headers_)));
pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".dynamic",
- dynamic_builder_.section_.sh_offset,
+ dynamic_builder_.GetSection()->sh_offset,
dynamic.data(),
- dynamic_builder_.section_.sh_size));
- pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".dynsym", dynsym_builder_.section_.sh_offset,
+ dynamic_builder_.GetSection()->sh_size));
+ pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".dynsym", dynsym_builder_.GetSection()->sh_offset,
dynsym.data(),
dynsym.size() * sizeof(Elf_Sym)));
pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".dynstr",
- dynsym_builder_.GetStrTab()->section_.sh_offset,
+ dynsym_builder_.GetStrTab()->GetSection()->sh_offset,
dynstr_.c_str(), dynstr_.size()));
- pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".hash", hash_builder_.section_.sh_offset,
+ pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".hash", hash_builder_.GetSection()->sh_offset,
hash_.data(),
hash_.size() * sizeof(Elf_Word)));
- pieces.push_back(new ElfFileRodataPiece<Elf_Word>(rodata_builder_.section_.sh_offset,
+ pieces.push_back(new ElfFileRodataPiece<Elf_Word>(rodata_builder_.GetSection()->sh_offset,
oat_writer_));
- pieces.push_back(new ElfFileOatTextPiece<Elf_Word>(text_builder_.section_.sh_offset,
+ pieces.push_back(new ElfFileOatTextPiece<Elf_Word>(text_builder_.GetSection()->sh_offset,
oat_writer_));
if (IncludingDebugSymbols()) {
pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".symtab",
- symtab_builder_.section_.sh_offset,
+ symtab_builder_.GetSection()->sh_offset,
symtab.data(),
symtab.size() * sizeof(Elf_Sym)));
pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".strtab",
- symtab_builder_.GetStrTab()->section_.sh_offset,
+ symtab_builder_.GetStrTab()->GetSection()->sh_offset,
strtab.c_str(), strtab.size()));
}
pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".shstrtab",
- shstrtab_builder_.section_.sh_offset,
+ shstrtab_builder_.GetSection()->sh_offset,
&shstrtab_[0], shstrtab_.size()));
for (uint32_t i = 0; i < section_ptrs_.size(); ++i) {
// Just add all the sections in induvidually since they are all over the
@@ -983,7 +1052,7 @@ class ElfBuilder FINAL {
// Postponed debug info.
for (auto it = other_builders_.begin(); it != other_builders_.end(); ++it) {
- pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(it->name_, it->section_.sh_offset,
+ pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(it->GetName(), it->GetSection()->sh_offset,
it->GetBuffer()->data(),
it->GetBuffer()->size()));
}
@@ -1006,47 +1075,6 @@ class ElfBuilder FINAL {
}
private:
- CodeOutput* oat_writer_;
- File* elf_file_;
- const bool add_symbols_;
- const bool debug_logging_;
-
- bool fatal_error_ = false;
-
- // What phdr is.
- static const uint32_t PHDR_OFFSET = sizeof(Elf_Ehdr);
- enum : uint8_t {
- PH_PHDR = 0,
- PH_LOAD_R__ = 1,
- PH_LOAD_R_X = 2,
- PH_LOAD_RW_ = 3,
- PH_DYNAMIC = 4,
- PH_NUM = 5,
- };
- static const uint32_t PHDR_SIZE = sizeof(Elf_Phdr) * PH_NUM;
- Elf_Phdr program_headers_[PH_NUM];
-
- Elf_Ehdr elf_header_;
-
- Elf_Shdr null_hdr_;
- std::string shstrtab_;
- uint32_t section_index_;
- std::string dynstr_;
- uint32_t dynstr_soname_offset_;
- std::vector<Elf_Shdr*> section_ptrs_;
- std::vector<Elf_Word> hash_;
-
- public:
- ElfOatSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> text_builder_;
- ElfOatSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> rodata_builder_;
- ElfSymtabBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Sym, Elf_Shdr> dynsym_builder_;
- ElfSymtabBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Sym, Elf_Shdr> symtab_builder_;
- ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> hash_builder_;
- ElfDynamicBuilder<Elf_Word, Elf_Sword, Elf_Dyn, Elf_Shdr> dynamic_builder_;
- ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> shstrtab_builder_;
- std::vector<ElfRawSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>> other_builders_;
-
- private:
void SetISA(InstructionSet isa) {
switch (isa) {
case kArm:
@@ -1141,14 +1169,14 @@ class ElfBuilder FINAL {
true, 4, STB_GLOBAL, STT_OBJECT);
}
- void AssignSectionStr(ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> *builder,
+ void AssignSectionStr(ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* builder,
std::string* strtab) {
- builder->section_.sh_name = strtab->size();
- *strtab += builder->name_;
+ builder->GetSection()->sh_name = strtab->size();
+ *strtab += builder->GetName();
*strtab += '\0';
if (debug_logging_) {
- LOG(INFO) << "adding section name \"" << builder->name_ << "\" "
- << "to shstrtab at offset " << builder->section_.sh_name;
+ LOG(INFO) << "adding section name \"" << builder->GetName() << "\" "
+ << "to shstrtab at offset " << builder->GetSection()->sh_name;
}
}
@@ -1163,7 +1191,51 @@ class ElfBuilder FINAL {
return true;
}
- bool IncludingDebugSymbols() { return add_symbols_ && symtab_builder_.GetSize() > 1; }
+ bool IncludingDebugSymbols() const {
+ return add_symbols_ && symtab_builder_.GetSize() > 1;
+ }
+
+ CodeOutput* const oat_writer_;
+ File* const elf_file_;
+ const bool add_symbols_;
+ const bool debug_logging_;
+
+ bool fatal_error_ = false;
+
+ // What phdr is.
+ static const uint32_t PHDR_OFFSET = sizeof(Elf_Ehdr);
+ enum : uint8_t {
+ PH_PHDR = 0,
+ PH_LOAD_R__ = 1,
+ PH_LOAD_R_X = 2,
+ PH_LOAD_RW_ = 3,
+ PH_DYNAMIC = 4,
+ PH_NUM = 5,
+ };
+ static const uint32_t PHDR_SIZE = sizeof(Elf_Phdr) * PH_NUM;
+ Elf_Phdr program_headers_[PH_NUM];
+
+ Elf_Ehdr elf_header_;
+
+ Elf_Shdr null_hdr_;
+ std::string shstrtab_;
+ // The index of the current section being built. The first being 1.
+ uint32_t section_index_;
+ std::string dynstr_;
+ uint32_t dynstr_soname_offset_;
+ std::vector<const Elf_Shdr*> section_ptrs_;
+ std::vector<Elf_Word> hash_;
+
+ ElfOatSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> text_builder_;
+ ElfOatSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> rodata_builder_;
+ ElfSymtabBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Sym, Elf_Shdr> dynsym_builder_;
+ ElfSymtabBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Sym, Elf_Shdr> symtab_builder_;
+ ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> hash_builder_;
+ ElfDynamicBuilder<Elf_Word, Elf_Sword, Elf_Dyn, Elf_Shdr> dynamic_builder_;
+ ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> shstrtab_builder_;
+ std::vector<ElfRawSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>> other_builders_;
+
+ DISALLOW_COPY_AND_ASSIGN(ElfBuilder);
};
} // namespace art
diff --git a/compiler/elf_writer_mclinker.cc b/compiler/elf_writer_mclinker.cc
index f0176412e1..7705b9cf8a 100644
--- a/compiler/elf_writer_mclinker.cc
+++ b/compiler/elf_writer_mclinker.cc
@@ -82,7 +82,7 @@ bool ElfWriterMclinker::Write(OatWriter* oat_writer,
}
// Fill oat_contents.
- VectorOutputStream output_stream("oat contents", oat_contents);
+ VectorOutputStream output_stream("oat contents", &oat_contents);
oat_writer->SetOatDataOffset(oat_section->offset());
CHECK(oat_writer->Write(&output_stream));
CHECK_EQ(oat_writer->GetSize(), oat_contents.size());
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index e661324676..c75d8f877d 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -195,7 +195,7 @@ std::vector<uint8_t>* ConstructCIEFrame(InstructionSet isa) {
}
}
-class OatWriterWrapper : public CodeOutput {
+class OatWriterWrapper FINAL : public CodeOutput {
public:
explicit OatWriterWrapper(OatWriter* oat_writer) : oat_writer_(oat_writer) {}
@@ -206,7 +206,7 @@ class OatWriterWrapper : public CodeOutput {
return oat_writer_->Write(out);
}
private:
- OatWriter* oat_writer_;
+ OatWriter* const oat_writer_;
};
template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
@@ -676,14 +676,14 @@ static void WriteDebugSymbols(const CompilerDriver* compiler_driver,
std::unique_ptr<std::vector<uint8_t>> cfi_info(
ConstructCIEFrame(compiler_driver->GetInstructionSet()));
- Elf_Addr text_section_address = builder->text_builder_.section_.sh_addr;
+ Elf_Addr text_section_address = builder->GetTextBuilder().GetSection()->sh_addr;
// Iterate over the compiled methods.
const std::vector<OatWriter::DebugInfo>& method_info = oat_writer->GetCFIMethodInfo();
- ElfSymtabBuilder<Elf_Word, Elf_Sword, Elf_Addr,
- Elf_Sym, Elf_Shdr>* symtab = &builder->symtab_builder_;
+ ElfSymtabBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Sym, Elf_Shdr>* symtab =
+ builder->GetSymtabBuilder();
for (auto it = method_info.begin(); it != method_info.end(); ++it) {
- symtab->AddSymbol(it->method_name_, &builder->text_builder_, it->low_pc_, true,
+ symtab->AddSymbol(it->method_name_, &builder->GetTextBuilder(), it->low_pc_, true,
it->high_pc_ - it->low_pc_, STB_GLOBAL, STT_FUNC);
// Include CFI for compiled method, if possible.
diff --git a/compiler/file_output_stream.h b/compiler/file_output_stream.h
index 76b00fe129..9dfbd7fcef 100644
--- a/compiler/file_output_stream.h
+++ b/compiler/file_output_stream.h
@@ -23,7 +23,7 @@
namespace art {
-class FileOutputStream : public OutputStream {
+class FileOutputStream FINAL : public OutputStream {
public:
explicit FileOutputStream(File* file);
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 5834e8e940..cf4259f790 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -163,8 +163,8 @@ TEST_F(ImageTest, WriteRead) {
gc::space::ImageSpace* image_space = heap->GetImageSpace();
image_space->VerifyImageAllocations();
- byte* image_begin = image_space->Begin();
- byte* image_end = image_space->End();
+ uint8_t* image_begin = image_space->Begin();
+ uint8_t* image_end = image_space->End();
CHECK_EQ(requested_image_base, reinterpret_cast<uintptr_t>(image_begin));
for (size_t i = 0; i < dex->NumClassDefs(); ++i) {
const DexFile::ClassDef& class_def = dex->GetClassDef(i);
@@ -173,11 +173,11 @@ TEST_F(ImageTest, WriteRead) {
EXPECT_TRUE(klass != nullptr) << descriptor;
if (image_classes.find(descriptor) != image_classes.end()) {
// Image classes should be located inside the image.
- EXPECT_LT(image_begin, reinterpret_cast<byte*>(klass)) << descriptor;
- EXPECT_LT(reinterpret_cast<byte*>(klass), image_end) << descriptor;
+ EXPECT_LT(image_begin, reinterpret_cast<uint8_t*>(klass)) << descriptor;
+ EXPECT_LT(reinterpret_cast<uint8_t*>(klass), image_end) << descriptor;
} else {
- EXPECT_TRUE(reinterpret_cast<byte*>(klass) >= image_end ||
- reinterpret_cast<byte*>(klass) < image_begin) << descriptor;
+ EXPECT_TRUE(reinterpret_cast<uint8_t*>(klass) >= image_end ||
+ reinterpret_cast<uint8_t*>(klass) < image_begin) << descriptor;
}
EXPECT_TRUE(Monitor::IsValidLockWord(klass->GetLockWord(false)));
}
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 6fff5f4503..35a3d4b3b2 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -559,10 +559,10 @@ void ImageWriter::CalculateNewObjectOffsets() {
void ImageWriter::CreateHeader(size_t oat_loaded_size, size_t oat_data_offset) {
CHECK_NE(0U, oat_loaded_size);
- const byte* oat_file_begin = GetOatFileBegin();
- const byte* oat_file_end = oat_file_begin + oat_loaded_size;
+ const uint8_t* oat_file_begin = GetOatFileBegin();
+ const uint8_t* oat_file_end = oat_file_begin + oat_loaded_size;
oat_data_begin_ = oat_file_begin + oat_data_offset;
- const byte* oat_data_end = oat_data_begin_ + oat_file_->Size();
+ const uint8_t* oat_data_end = oat_data_begin_ + oat_file_->Size();
// Return to write header at start of image with future location of image_roots. At this point,
// image_end_ is the size of the image (excluding bitmaps).
@@ -604,8 +604,8 @@ void ImageWriter::CopyAndFixupObjectsCallback(Object* obj, void* arg) {
ImageWriter* image_writer = reinterpret_cast<ImageWriter*>(arg);
// see GetLocalAddress for similar computation
size_t offset = image_writer->GetImageOffset(obj);
- byte* dst = image_writer->image_->Begin() + offset;
- const byte* src = reinterpret_cast<const byte*>(obj);
+ uint8_t* dst = image_writer->image_->Begin() + offset;
+ const uint8_t* src = reinterpret_cast<const uint8_t*>(obj);
size_t n = obj->SizeOf();
DCHECK_LT(offset + n, image_writer->image_->Size());
memcpy(dst, src, n);
@@ -688,7 +688,7 @@ void ImageWriter::FixupObject(Object* orig, Object* copy) {
}
}
-const byte* ImageWriter::GetQuickCode(mirror::ArtMethod* method, bool* quick_is_interpreted) {
+const uint8_t* ImageWriter::GetQuickCode(mirror::ArtMethod* method, bool* quick_is_interpreted) {
DCHECK(!method->IsResolutionMethod() && !method->IsImtConflictMethod() &&
!method->IsAbstract()) << PrettyMethod(method);
@@ -696,7 +696,7 @@ const byte* ImageWriter::GetQuickCode(mirror::ArtMethod* method, bool* quick_is_
// trampoline.
// Quick entrypoint:
- const byte* quick_code = GetOatAddress(method->GetQuickOatCodeOffset());
+ const uint8_t* quick_code = GetOatAddress(method->GetQuickOatCodeOffset());
*quick_is_interpreted = false;
if (quick_code != nullptr &&
(!method->IsStatic() || method->IsConstructor() || method->GetDeclaringClass()->IsInitialized())) {
@@ -718,7 +718,7 @@ const byte* ImageWriter::GetQuickCode(mirror::ArtMethod* method, bool* quick_is_
return quick_code;
}
-const byte* ImageWriter::GetQuickEntryPoint(mirror::ArtMethod* method) {
+const uint8_t* ImageWriter::GetQuickEntryPoint(mirror::ArtMethod* method) {
// Calculate the quick entry point following the same logic as FixupMethod() below.
// The resolution method has a special trampoline to call.
if (UNLIKELY(method == Runtime::Current()->GetResolutionMethod())) {
@@ -757,14 +757,14 @@ void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_to_interpreter_bridge_offset_));
copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_to_interpreter_bridge_offset_));
copy->SetEntryPointFromInterpreter<kVerifyNone>(reinterpret_cast<EntryPointFromInterpreter*>
- (const_cast<byte*>(GetOatAddress(interpreter_to_interpreter_bridge_offset_))));
+ (const_cast<uint8_t*>(GetOatAddress(interpreter_to_interpreter_bridge_offset_))));
} else {
bool quick_is_interpreted;
- const byte* quick_code = GetQuickCode(orig, &quick_is_interpreted);
+ const uint8_t* quick_code = GetQuickCode(orig, &quick_is_interpreted);
copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(quick_code);
// Portable entrypoint:
- const byte* portable_code = GetOatAddress(orig->GetPortableOatCodeOffset());
+ const uint8_t* portable_code = GetOatAddress(orig->GetPortableOatCodeOffset());
bool portable_is_interpreted = false;
if (portable_code != nullptr &&
(!orig->IsStatic() || orig->IsConstructor() || orig->GetDeclaringClass()->IsInitialized())) {
@@ -794,7 +794,7 @@ void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
} else {
// Normal (non-abstract non-native) methods have various tables to relocate.
uint32_t native_gc_map_offset = orig->GetOatNativeGcMapOffset();
- const byte* native_gc_map = GetOatAddress(native_gc_map_offset);
+ const uint8_t* native_gc_map = GetOatAddress(native_gc_map_offset);
copy->SetNativeGcMap<kVerifyNone>(reinterpret_cast<const uint8_t*>(native_gc_map));
}
@@ -805,7 +805,7 @@ void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
: interpreter_to_compiled_code_bridge_offset_;
copy->SetEntryPointFromInterpreter<kVerifyNone>(
reinterpret_cast<EntryPointFromInterpreter*>(
- const_cast<byte*>(GetOatAddress(interpreter_code))));
+ const_cast<uint8_t*>(GetOatAddress(interpreter_code))));
}
}
}
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index bdf06148ec..e6a98d1143 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -38,7 +38,7 @@ namespace art {
class ImageWriter {
public:
ImageWriter(const CompilerDriver& compiler_driver, uintptr_t image_begin)
- : compiler_driver_(compiler_driver), image_begin_(reinterpret_cast<byte*>(image_begin)),
+ : compiler_driver_(compiler_driver), image_begin_(reinterpret_cast<uint8_t*>(image_begin)),
image_end_(0), image_roots_address_(0), oat_file_(NULL),
oat_data_begin_(NULL), interpreter_to_interpreter_bridge_offset_(0),
interpreter_to_compiled_code_bridge_offset_(0), jni_dlsym_lookup_offset_(0),
@@ -65,7 +65,7 @@ class ImageWriter {
return reinterpret_cast<mirror::Object*>(image_begin_ + GetImageOffset(object));
}
- byte* GetOatFileBegin() const {
+ uint8_t* GetOatFileBegin() const {
return image_begin_ + RoundUp(image_end_, kPageSize);
}
@@ -100,11 +100,11 @@ class ImageWriter {
mirror::Object* GetLocalAddress(mirror::Object* object) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
size_t offset = GetImageOffset(object);
- byte* dst = image_->Begin() + offset;
+ uint8_t* dst = image_->Begin() + offset;
return reinterpret_cast<mirror::Object*>(dst);
}
- const byte* GetOatAddress(uint32_t offset) const {
+ const uint8_t* GetOatAddress(uint32_t offset) const {
#if !defined(ART_USE_PORTABLE_COMPILER)
// With Quick, code is within the OatFile, as there are all in one
// .o ELF object. However with Portable, the code is always in
@@ -171,10 +171,10 @@ class ImageWriter {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get quick code for non-resolution/imt_conflict/abstract method.
- const byte* GetQuickCode(mirror::ArtMethod* method, bool* quick_is_interpreted)
+ const uint8_t* GetQuickCode(mirror::ArtMethod* method, bool* quick_is_interpreted)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const byte* GetQuickEntryPoint(mirror::ArtMethod* method)
+ const uint8_t* GetQuickEntryPoint(mirror::ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Patches references in OatFile to expect runtime addresses.
@@ -183,7 +183,7 @@ class ImageWriter {
const CompilerDriver& compiler_driver_;
// Beginning target image address for the output image.
- byte* image_begin_;
+ uint8_t* image_begin_;
// Offset to the free space in image_.
size_t image_end_;
@@ -201,7 +201,7 @@ class ImageWriter {
std::vector<std::pair<mirror::Object*, uint32_t>> saved_hashes_;
// Beginning target oat address for the pointers from the output image to its oat file.
- const byte* oat_data_begin_;
+ const uint8_t* oat_data_begin_;
// Image bitmap which lets us know where the objects inside of the image reside.
std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> image_bitmap_;
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 78a228be47..f6795ea28c 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -428,10 +428,6 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
// 17. Finalize code generation
__ EmitSlowPaths();
size_t cs = __ CodeSize();
- if (instruction_set == kArm64) {
- // Test that we do not exceed the buffer size.
- CHECK(cs < arm64::kBufferSizeArm64);
- }
std::vector<uint8_t> managed_code(cs);
MemoryRegion code(&managed_code[0], managed_code.size());
__ FinalizeInstructions(code);
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 0b1f9e2cf9..3fcc369c5e 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -155,7 +155,7 @@ TEST_F(OatTest, WriteRead) {
ScopedObjectAccess soa(Thread::Current());
for (size_t i = 0; i < dex_file->NumClassDefs(); i++) {
const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
- const byte* class_data = dex_file->GetClassData(class_def);
+ const uint8_t* class_data = dex_file->GetClassData(class_def);
size_t num_virtual_methods = 0;
if (class_data != NULL) {
ClassDataItemIterator it(*dex_file, class_data);
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index e64d2ab279..be52f40a0b 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -1208,7 +1208,7 @@ bool OatWriter::VisitDexMethods(DexMethodVisitor* visitor) {
return false;
}
const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
- const byte* class_data = dex_file->GetClassData(class_def);
+ const uint8_t* class_data = dex_file->GetClassData(class_def);
if (class_data != NULL) { // ie not an empty class, such as a marker interface
ClassDataItemIterator it(*dex_file, class_data);
while (it.HasNextStaticField()) {
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 7d7b188362..e68cdb0b1d 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -22,7 +22,7 @@
#include "driver/compiler_driver.h"
#include "driver/dex_compilation_unit.h"
#include "primitive.h"
-#include "utils/allocation.h"
+#include "utils/arena_object.h"
#include "utils/growable_array.h"
#include "nodes.h"
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 12973816ee..0e6b2031bb 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1289,9 +1289,10 @@ void InstructionCodeGeneratorARM::VisitInstanceFieldSet(HInstanceFieldSet* instr
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Unimplemented register type " << field_type;
-
+ UNREACHABLE();
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << field_type;
+ UNREACHABLE();
}
}
@@ -1349,9 +1350,10 @@ void InstructionCodeGeneratorARM::VisitInstanceFieldGet(HInstanceFieldGet* instr
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
-
+ UNREACHABLE();
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
}
}
@@ -1479,9 +1481,10 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
-
+ UNREACHABLE();
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
}
}
@@ -1576,9 +1579,10 @@ void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
-
+ UNREACHABLE();
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
}
}
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 114a5744df..15a199964b 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1282,9 +1282,10 @@ void InstructionCodeGeneratorX86::VisitInstanceFieldSet(HInstanceFieldSet* instr
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Unimplemented register type " << field_type;
-
+ UNREACHABLE();
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << field_type;
+ UNREACHABLE();
}
}
@@ -1354,9 +1355,10 @@ void InstructionCodeGeneratorX86::VisitInstanceFieldGet(HInstanceFieldGet* instr
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
-
+ UNREACHABLE();
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
}
}
@@ -1484,9 +1486,10 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
-
+ UNREACHABLE();
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
}
}
@@ -1637,9 +1640,10 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
-
+ UNREACHABLE();
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
}
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index d37ef06687..4b61546f2c 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1164,9 +1164,10 @@ void InstructionCodeGeneratorX86_64::VisitInstanceFieldSet(HInstanceFieldSet* in
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Unimplemented register type " << field_type;
-
+ UNREACHABLE();
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << field_type;
+ UNREACHABLE();
}
}
@@ -1218,9 +1219,10 @@ void InstructionCodeGeneratorX86_64::VisitInstanceFieldGet(HInstanceFieldGet* in
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
-
+ UNREACHABLE();
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
}
}
@@ -1345,9 +1347,10 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
-
+ UNREACHABLE();
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
}
}
@@ -1471,9 +1474,10 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
-
+ UNREACHABLE();
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
}
}
diff --git a/compiler/optimizing/graph_visualizer.h b/compiler/optimizing/graph_visualizer.h
index 6e2c6fd11f..f17ba3bbac 100644
--- a/compiler/optimizing/graph_visualizer.h
+++ b/compiler/optimizing/graph_visualizer.h
@@ -17,7 +17,7 @@
#ifndef ART_COMPILER_OPTIMIZING_GRAPH_VISUALIZER_H_
#define ART_COMPILER_OPTIMIZING_GRAPH_VISUALIZER_H_
-#include "utils/allocation.h"
+#include "base/value_object.h"
namespace art {
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index ac44a42e76..dcf70f27b0 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -19,7 +19,8 @@
#include "base/bit_field.h"
#include "base/bit_vector.h"
-#include "utils/allocation.h"
+#include "base/value_object.h"
+#include "utils/arena_object.h"
#include "utils/growable_array.h"
namespace art {
@@ -94,7 +95,7 @@ class Location : public ValueObject {
static Location ConstantLocation(HConstant* constant) {
DCHECK(constant != nullptr);
- return Location(kConstant | reinterpret_cast<uword>(constant));
+ return Location(kConstant | reinterpret_cast<uintptr_t>(constant));
}
HConstant* GetConstant() const {
@@ -169,7 +170,7 @@ class Location : public ValueObject {
}
static Location StackSlot(intptr_t stack_index) {
- uword payload = EncodeStackIndex(stack_index);
+ uintptr_t payload = EncodeStackIndex(stack_index);
Location loc(kStackSlot, payload);
// Ensure that sign is preserved.
DCHECK_EQ(loc.GetStackIndex(), stack_index);
@@ -181,7 +182,7 @@ class Location : public ValueObject {
}
static Location DoubleStackSlot(intptr_t stack_index) {
- uword payload = EncodeStackIndex(stack_index);
+ uintptr_t payload = EncodeStackIndex(stack_index);
Location loc(kDoubleStackSlot, payload);
// Ensure that sign is preserved.
DCHECK_EQ(loc.GetStackIndex(), stack_index);
@@ -287,27 +288,27 @@ class Location : public ValueObject {
return PolicyField::Decode(GetPayload());
}
- uword GetEncoding() const {
+ uintptr_t GetEncoding() const {
return GetPayload();
}
private:
// Number of bits required to encode Kind value.
static constexpr uint32_t kBitsForKind = 4;
- static constexpr uint32_t kBitsForPayload = kWordSize * kBitsPerByte - kBitsForKind;
- static constexpr uword kLocationConstantMask = 0x3;
+ static constexpr uint32_t kBitsForPayload = kBitsPerIntPtrT - kBitsForKind;
+ static constexpr uintptr_t kLocationConstantMask = 0x3;
- explicit Location(uword value) : value_(value) {}
+ explicit Location(uintptr_t value) : value_(value) {}
- Location(Kind kind, uword payload)
+ Location(Kind kind, uintptr_t payload)
: value_(KindField::Encode(kind) | PayloadField::Encode(payload)) {}
- uword GetPayload() const {
+ uintptr_t GetPayload() const {
return PayloadField::Decode(value_);
}
typedef BitField<Kind, 0, kBitsForKind> KindField;
- typedef BitField<uword, kBitsForKind, kBitsForPayload> PayloadField;
+ typedef BitField<uintptr_t, kBitsForKind, kBitsForPayload> PayloadField;
// Layout for kUnallocated locations payload.
typedef BitField<Policy, 0, 3> PolicyField;
@@ -319,7 +320,7 @@ class Location : public ValueObject {
// Location either contains kind and payload fields or a tagged handle for
// a constant locations. Values of enumeration Kind are selected in such a
// way that none of them can be interpreted as a kConstant tag.
- uword value_;
+ uintptr_t value_;
};
class RegisterSet : public ValueObject {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 41713a401b..677a4f8591 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -20,7 +20,7 @@
#include "locations.h"
#include "offsets.h"
#include "primitive.h"
-#include "utils/allocation.h"
+#include "utils/arena_object.h"
#include "utils/arena_bit_vector.h"
#include "utils/growable_array.h"
diff --git a/compiler/optimizing/parallel_move_resolver.h b/compiler/optimizing/parallel_move_resolver.h
index fcc1de6dc9..309425ef4d 100644
--- a/compiler/optimizing/parallel_move_resolver.h
+++ b/compiler/optimizing/parallel_move_resolver.h
@@ -17,7 +17,7 @@
#ifndef ART_COMPILER_OPTIMIZING_PARALLEL_MOVE_RESOLVER_H_
#define ART_COMPILER_OPTIMIZING_PARALLEL_MOVE_RESOLVER_H_
-#include "utils/allocation.h"
+#include "base/value_object.h"
#include "utils/growable_array.h"
namespace art {
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 0ea11ad04b..5f74c33643 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -18,9 +18,9 @@
#define ART_COMPILER_OPTIMIZING_STACK_MAP_STREAM_H_
#include "base/bit_vector.h"
+#include "base/value_object.h"
#include "memory_region.h"
#include "stack_map.h"
-#include "utils/allocation.h"
#include "utils/growable_array.h"
namespace art {
diff --git a/compiler/output_stream_test.cc b/compiler/output_stream_test.cc
index 315ca09e59..bba98926b3 100644
--- a/compiler/output_stream_test.cc
+++ b/compiler/output_stream_test.cc
@@ -90,7 +90,7 @@ TEST_F(OutputStreamTest, Buffered) {
TEST_F(OutputStreamTest, Vector) {
std::vector<uint8_t> output;
- VectorOutputStream output_stream("test vector output", output);
+ VectorOutputStream output_stream("test vector output", &output);
SetOutputStream(output_stream);
GenerateTestOutput();
CheckTestOutput(output);
diff --git a/compiler/utils/allocation.h b/compiler/utils/arena_object.h
index b0947cac68..50909f7532 100644
--- a/compiler/utils/allocation.h
+++ b/compiler/utils/arena_object.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_COMPILER_UTILS_ALLOCATION_H_
-#define ART_COMPILER_UTILS_ALLOCATION_H_
+#ifndef ART_COMPILER_UTILS_ARENA_OBJECT_H_
+#define ART_COMPILER_UTILS_ARENA_OBJECT_H_
#include "arena_allocator.h"
#include "base/logging.h"
@@ -34,17 +34,6 @@ class ArenaObject {
}
};
-class ValueObject {
- public:
- void* operator new(size_t size) {
- LOG(FATAL) << "UNREACHABLE";
- abort();
- }
- void operator delete(void*, size_t) {
- LOG(FATAL) << "UNREACHABLE";
- }
-};
-
} // namespace art
-#endif // ART_COMPILER_UTILS_ALLOCATION_H_
+#endif // ART_COMPILER_UTILS_ARENA_OBJECT_H_
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index 637a1ff297..b430c7ee97 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -386,7 +386,7 @@ bool Address::CanHoldStoreOffsetThumb(StoreOperandType type, int offset) {
void ArmAssembler::Pad(uint32_t bytes) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
for (uint32_t i = 0; i < bytes; ++i) {
- buffer_.Emit<byte>(0);
+ buffer_.Emit<uint8_t>(0);
}
}
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index 54965f678f..14d48b7737 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -600,7 +600,7 @@ class ArmAssembler : public Assembler {
virtual void Ror(Register rd, Register rm, Register rn, bool setcc = false,
Condition cond = AL) = 0;
- static bool IsInstructionForExceptionHandling(uword pc);
+ static bool IsInstructionForExceptionHandling(uintptr_t pc);
virtual void Bind(Label* label) = 0;
diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h
index 7f9094d664..c89fd04e9e 100644
--- a/compiler/utils/arm/assembler_arm32.h
+++ b/compiler/utils/arm/assembler_arm32.h
@@ -273,7 +273,7 @@ class Arm32Assembler FINAL : public ArmAssembler {
Condition cond = AL) OVERRIDE;
- static bool IsInstructionForExceptionHandling(uword pc);
+ static bool IsInstructionForExceptionHandling(uintptr_t pc);
// Emit data (e.g. encoded instruction or immediate) to the
// instruction stream.
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index ee33bf23c9..9e7d394555 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -303,7 +303,7 @@ class Thumb2Assembler FINAL : public ArmAssembler {
Condition cond = AL) OVERRIDE;
- static bool IsInstructionForExceptionHandling(uword pc);
+ static bool IsInstructionForExceptionHandling(uintptr_t pc);
// Emit data (e.g. encoded instruction or immediate) to the.
// instruction stream.
diff --git a/compiler/utils/arm/constants_arm.h b/compiler/utils/arm/constants_arm.h
index 3e4cd43dc3..092c89192d 100644
--- a/compiler/utils/arm/constants_arm.h
+++ b/compiler/utils/arm/constants_arm.h
@@ -223,7 +223,7 @@ typedef uint16_t RegList;
// Example: Test whether the instruction at ptr does set the condition code
// bits.
//
-// bool InstructionSetsConditionCodes(byte* ptr) {
+// bool InstructionSetsConditionCodes(uint8_t* ptr) {
// Instr* instr = Instr::At(ptr);
// int type = instr->TypeField();
// return ((type == 0) || (type == 1)) && instr->HasS();
@@ -435,7 +435,7 @@ class Instr {
// reference to an instruction is to convert a pointer. There is no way
// to allocate or create instances of class Instr.
// Use the At(pc) function to create references to Instr.
- static Instr* At(uword pc) { return reinterpret_cast<Instr*>(pc); }
+ static Instr* At(uintptr_t pc) { return reinterpret_cast<Instr*>(pc); }
Instr* Next() { return this + kInstrSize; }
private:
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index c82b4f0f50..25e02c35da 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -42,12 +42,12 @@ void Arm64Assembler::EmitSlowPaths() {
}
size_t Arm64Assembler::CodeSize() const {
- return ___ SizeOfCodeGenerated();
+ return vixl_masm_->BufferCapacity() - vixl_masm_->RemainingBufferSpace();
}
void Arm64Assembler::FinalizeInstructions(const MemoryRegion& region) {
// Copy the instructions from the buffer.
- MemoryRegion from(reinterpret_cast<void*>(vixl_buf_), CodeSize());
+ MemoryRegion from(vixl_masm_->GetStartAddress<void*>(), CodeSize());
region.CopyFrom(0, from);
}
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index bf89d24bd5..ef833345f7 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -59,12 +59,12 @@ class Arm64Exception;
class Arm64Assembler FINAL : public Assembler {
public:
- Arm64Assembler() : vixl_buf_(new byte[kBufferSizeArm64]),
- vixl_masm_(new vixl::MacroAssembler(vixl_buf_, kBufferSizeArm64)) {}
+ // We indicate the size of the initial code generation buffer to the VIXL
+ // assembler. From there we it will automatically manage the buffer.
+ Arm64Assembler() : vixl_masm_(new vixl::MacroAssembler(kArm64BaseBufferSize)) {}
virtual ~Arm64Assembler() {
delete vixl_masm_;
- delete[] vixl_buf_;
}
// Emit slow paths queued during assembly.
@@ -213,11 +213,8 @@ class Arm64Assembler FINAL : public Assembler {
void AddConstant(Register rd, int32_t value, vixl::Condition cond = vixl::al);
void AddConstant(Register rd, Register rn, int32_t value, vixl::Condition cond = vixl::al);
- // Vixl buffer.
- byte* vixl_buf_;
-
// Vixl assembler.
- vixl::MacroAssembler* vixl_masm_;
+ vixl::MacroAssembler* const vixl_masm_;
// List of exception blocks to generate at the end of the code cache.
std::vector<Arm64Exception*> exception_blocks_;
diff --git a/compiler/utils/arm64/constants_arm64.h b/compiler/utils/arm64/constants_arm64.h
index 0cbbb1eeff..ffb54d322f 100644
--- a/compiler/utils/arm64/constants_arm64.h
+++ b/compiler/utils/arm64/constants_arm64.h
@@ -31,8 +31,7 @@ namespace arm64 {
constexpr unsigned int kJniRefSpillRegsSize = 11;
-// Vixl buffer size.
-constexpr size_t kBufferSizeArm64 = 4096*2;
+constexpr size_t kArm64BaseBufferSize = 4096;
} // namespace arm64
} // namespace art
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index e3045e1a7b..8a1289dc17 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -30,8 +30,8 @@
namespace art {
-static byte* NewContents(size_t capacity) {
- return new byte[capacity];
+static uint8_t* NewContents(size_t capacity) {
+ return new uint8_t[capacity];
}
@@ -85,7 +85,7 @@ void AssemblerBuffer::ExtendCapacity() {
size_t new_capacity = std::min(old_capacity * 2, old_capacity + 1 * MB);
// Allocate the new data area and copy contents of the old one to it.
- byte* new_contents = NewContents(new_capacity);
+ uint8_t* new_contents = NewContents(new_capacity);
memmove(reinterpret_cast<void*>(new_contents),
reinterpret_cast<void*>(contents_),
old_size);
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 4addfa0946..91b8d8ab9a 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -56,19 +56,19 @@ namespace x86_64 {
class ExternalLabel {
public:
- ExternalLabel(const char* name, uword address)
+ ExternalLabel(const char* name, uintptr_t address)
: name_(name), address_(address) {
DCHECK(name != nullptr);
}
const char* name() const { return name_; }
- uword address() const {
+ uintptr_t address() const {
return address_;
}
private:
const char* name_;
- const uword address_;
+ const uintptr_t address_;
};
class Label {
@@ -84,12 +84,12 @@ class Label {
// for unused labels.
int Position() const {
CHECK(!IsUnused());
- return IsBound() ? -position_ - kPointerSize : position_ - kPointerSize;
+ return IsBound() ? -position_ - sizeof(void*) : position_ - sizeof(void*);
}
int LinkPosition() const {
CHECK(IsLinked());
- return position_ - kPointerSize;
+ return position_ - sizeof(void*);
}
bool IsBound() const { return position_ < 0; }
@@ -105,13 +105,13 @@ class Label {
void BindTo(int position) {
CHECK(!IsBound());
- position_ = -position - kPointerSize;
+ position_ = -position - sizeof(void*);
CHECK(IsBound());
}
void LinkTo(int position) {
CHECK(!IsBound());
- position_ = position + kPointerSize;
+ position_ = position + sizeof(void*);
CHECK(IsLinked());
}
@@ -236,7 +236,7 @@ class AssemblerBuffer {
return cursor_ - contents_;
}
- byte* contents() const { return contents_; }
+ uint8_t* contents() const { return contents_; }
// Copy the assembled instructions into the specified memory block
// and apply all fixups.
@@ -316,9 +316,9 @@ class AssemblerBuffer {
// for a single, fast space check per instruction.
static const int kMinimumGap = 32;
- byte* contents_;
- byte* cursor_;
- byte* limit_;
+ uint8_t* contents_;
+ uint8_t* cursor_;
+ uint8_t* limit_;
AssemblerFixup* fixup_;
#ifndef NDEBUG
bool fixups_processed_;
@@ -327,8 +327,8 @@ class AssemblerBuffer {
// Head of linked list of slow paths
SlowPath* slow_path_;
- byte* cursor() const { return cursor_; }
- byte* limit() const { return limit_; }
+ uint8_t* cursor() const { return cursor_; }
+ uint8_t* limit() const { return limit_; }
size_t Capacity() const {
CHECK_GE(limit_, contents_);
return (limit_ - contents_) + kMinimumGap;
@@ -340,7 +340,7 @@ class AssemblerBuffer {
// Compute the limit based on the data area and the capacity. See
// description of kMinimumGap for the reasoning behind the value.
- static byte* ComputeLimit(byte* data, size_t capacity) {
+ static uint8_t* ComputeLimit(uint8_t* data, size_t capacity) {
return data + capacity - kMinimumGap;
}
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 3ff24b7a27..4ddf9793fd 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1291,7 +1291,7 @@ void X86Assembler::LoadDoubleConstant(XmmRegister dst, double value) {
pushl(Immediate(High32Bits(constant)));
pushl(Immediate(Low32Bits(constant)));
movsd(dst, Address(ESP, 0));
- addl(ESP, Immediate(2 * kWordSize));
+ addl(ESP, Immediate(2 * sizeof(intptr_t)));
}
@@ -1303,7 +1303,7 @@ void X86Assembler::FloatNegate(XmmRegister f) {
uint32_t d;
} float_negate_constant __attribute__((aligned(16))) =
{ 0x80000000, 0x00000000, 0x80000000, 0x00000000 };
- xorps(f, Address::Absolute(reinterpret_cast<uword>(&float_negate_constant)));
+ xorps(f, Address::Absolute(reinterpret_cast<uintptr_t>(&float_negate_constant)));
}
@@ -1313,7 +1313,7 @@ void X86Assembler::DoubleNegate(XmmRegister d) {
uint64_t b;
} double_negate_constant __attribute__((aligned(16))) =
{0x8000000000000000LL, 0x8000000000000000LL};
- xorpd(d, Address::Absolute(reinterpret_cast<uword>(&double_negate_constant)));
+ xorpd(d, Address::Absolute(reinterpret_cast<uintptr_t>(&double_negate_constant)));
}
@@ -1323,7 +1323,7 @@ void X86Assembler::DoubleAbs(XmmRegister reg) {
uint64_t b;
} double_abs_constant __attribute__((aligned(16))) =
{0x7FFFFFFFFFFFFFFFLL, 0x7FFFFFFFFFFFFFFFLL};
- andpd(reg, Address::Absolute(reinterpret_cast<uword>(&double_abs_constant)));
+ andpd(reg, Address::Absolute(reinterpret_cast<uintptr_t>(&double_abs_constant)));
}
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 1f6f7e6957..c7eada34f5 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -116,8 +116,8 @@ class Operand {
}
private:
- byte length_;
- byte encoding_[6];
+ uint8_t length_;
+ uint8_t encoding_[6];
explicit Operand(Register reg) { SetModRM(3, reg); }
@@ -192,7 +192,7 @@ class Address : public Operand {
}
}
- static Address Absolute(uword addr) {
+ static Address Absolute(uintptr_t addr) {
Address result;
result.SetModRM(0, EBP);
result.SetDisp32(addr);
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 705b639ad6..75823e336b 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -1490,7 +1490,7 @@ void X86_64Assembler::LoadDoubleConstant(XmmRegister dst, double value) {
pushq(Immediate(High32Bits(constant)));
pushq(Immediate(Low32Bits(constant)));
movsd(dst, Address(CpuRegister(RSP), 0));
- addq(CpuRegister(RSP), Immediate(2 * kWordSize));
+ addq(CpuRegister(RSP), Immediate(2 * sizeof(intptr_t)));
}
@@ -1502,7 +1502,7 @@ void X86_64Assembler::FloatNegate(XmmRegister f) {
uint32_t d;
} float_negate_constant __attribute__((aligned(16))) =
{ 0x80000000, 0x00000000, 0x80000000, 0x00000000 };
- xorps(f, Address::Absolute(reinterpret_cast<uword>(&float_negate_constant)));
+ xorps(f, Address::Absolute(reinterpret_cast<uintptr_t>(&float_negate_constant)));
}
@@ -1512,7 +1512,7 @@ void X86_64Assembler::DoubleNegate(XmmRegister d) {
uint64_t b;
} double_negate_constant __attribute__((aligned(16))) =
{0x8000000000000000LL, 0x8000000000000000LL};
- xorpd(d, Address::Absolute(reinterpret_cast<uword>(&double_negate_constant)));
+ xorpd(d, Address::Absolute(reinterpret_cast<uintptr_t>(&double_negate_constant)));
}
@@ -1522,7 +1522,7 @@ void X86_64Assembler::DoubleAbs(XmmRegister reg) {
uint64_t b;
} double_abs_constant __attribute__((aligned(16))) =
{0x7FFFFFFFFFFFFFFFLL, 0x7FFFFFFFFFFFFFFFLL};
- andpd(reg, Address::Absolute(reinterpret_cast<uword>(&double_abs_constant)));
+ andpd(reg, Address::Absolute(reinterpret_cast<uintptr_t>(&double_abs_constant)));
}
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 268f72bb66..1d9eba446a 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -227,7 +227,7 @@ class Address : public Operand {
}
// If no_rip is true then the Absolute address isn't RIP relative.
- static Address Absolute(uword addr, bool no_rip = false) {
+ static Address Absolute(uintptr_t addr, bool no_rip = false) {
Address result;
if (no_rip) {
result.SetModRM(0, CpuRegister(RSP));
diff --git a/compiler/vector_output_stream.cc b/compiler/vector_output_stream.cc
index e5ff729036..3d33673e1a 100644
--- a/compiler/vector_output_stream.cc
+++ b/compiler/vector_output_stream.cc
@@ -20,8 +20,8 @@
namespace art {
-VectorOutputStream::VectorOutputStream(const std::string& location, std::vector<uint8_t>& vector)
- : OutputStream(location), offset_(vector.size()), vector_(vector) {}
+VectorOutputStream::VectorOutputStream(const std::string& location, std::vector<uint8_t>* vector)
+ : OutputStream(location), offset_(vector->size()), vector_(vector) {}
off_t VectorOutputStream::Seek(off_t offset, Whence whence) {
CHECK(whence == kSeekSet || whence == kSeekCurrent || whence == kSeekEnd) << whence;
@@ -36,7 +36,7 @@ off_t VectorOutputStream::Seek(off_t offset, Whence whence) {
break;
}
case kSeekEnd: {
- new_offset = vector_.size() + offset;
+ new_offset = vector_->size() + offset;
break;
}
}
diff --git a/compiler/vector_output_stream.h b/compiler/vector_output_stream.h
index 09daa12e02..3c5877c0bd 100644
--- a/compiler/vector_output_stream.h
+++ b/compiler/vector_output_stream.h
@@ -25,21 +25,21 @@
namespace art {
-class VectorOutputStream : public OutputStream {
+class VectorOutputStream FINAL : public OutputStream {
public:
- VectorOutputStream(const std::string& location, std::vector<uint8_t>& vector);
+ VectorOutputStream(const std::string& location, std::vector<uint8_t>* vector);
virtual ~VectorOutputStream() {}
bool WriteFully(const void* buffer, size_t byte_count) {
- if (static_cast<size_t>(offset_) == vector_.size()) {
+ if (static_cast<size_t>(offset_) == vector_->size()) {
const uint8_t* start = reinterpret_cast<const uint8_t*>(buffer);
- vector_.insert(vector_.end(), &start[0], &start[byte_count]);
+ vector_->insert(vector_->end(), &start[0], &start[byte_count]);
offset_ += byte_count;
} else {
off_t new_offset = offset_ + byte_count;
EnsureCapacity(new_offset);
- memcpy(&vector_[offset_], buffer, byte_count);
+ memcpy(&(*vector_)[offset_], buffer, byte_count);
offset_ = new_offset;
}
return true;
@@ -49,13 +49,13 @@ class VectorOutputStream : public OutputStream {
private:
void EnsureCapacity(off_t new_offset) {
- if (new_offset > static_cast<off_t>(vector_.size())) {
- vector_.resize(new_offset);
+ if (new_offset > static_cast<off_t>(vector_->size())) {
+ vector_->resize(new_offset);
}
}
off_t offset_;
- std::vector<uint8_t>& vector_;
+ std::vector<uint8_t>* const vector_;
DISALLOW_COPY_AND_ASSIGN(VectorOutputStream);
};
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index 6f8e08b3ba..ac883fe4e3 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -407,11 +407,11 @@ uint64_t AdvSIMDExpand(uint32_t op, uint32_t cmode, uint32_t imm8) {
}
uint64_t imm = imm8;
switch (cmode321) {
- case 3: imm <<= 8; // Fall through.
- case 2: imm <<= 8; // Fall through.
- case 1: imm <<= 8; // Fall through.
+ case 3: imm <<= 8; FALLTHROUGH_INTENDED;
+ case 2: imm <<= 8; FALLTHROUGH_INTENDED;
+ case 1: imm <<= 8; FALLTHROUGH_INTENDED;
case 0: return static_cast<int64_t>((imm << 32) | imm);
- case 5: imm <<= 8; // Fall through.
+ case 5: imm <<= 8; FALLTHROUGH_INTENDED;
case 4: return static_cast<int64_t>((imm << 48) | (imm << 32) | (imm << 16) | imm);
case 6:
imm = ((imm + 1u) << ((cmode & 1) != 0 ? 16 : 8)) - 1u; // Add 8 or 16 ones.
@@ -1196,7 +1196,7 @@ size_t DisassemblerArm::DumpThumb32(std::ostream& os, const uint8_t* instr_ptr)
}
break;
}
- // Else deliberate fall-through to B.
+ FALLTHROUGH_INTENDED; // Else deliberate fall-through to B.
case 1: case 3: {
// B
// |111|11|1|0000|000000|11|1 |1|1 |10000000000|
@@ -1597,6 +1597,7 @@ size_t DisassemblerArm::DumpThumb32(std::ostream& os, const uint8_t* instr_ptr)
}
}
}
+ break;
default:
break;
}
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index 195c45f79d..63a74c7240 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -412,7 +412,7 @@ DISASSEMBLER_ENTRY(cmp,
break;
case 0x2E:
opcode << "u";
- // FALLTHROUGH
+ FALLTHROUGH_INTENDED;
case 0x2F:
if (prefix[2] == 0x66) {
opcode << "comisd";
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index d5e766f69d..1f2c0aa1cc 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -120,10 +120,12 @@ const char* image_roots_descriptions_[] = {
"kClassRoots",
};
-class OatSymbolizer : public CodeOutput {
+class OatSymbolizer FINAL : public CodeOutput {
public:
- explicit OatSymbolizer(const OatFile* oat_file, std::string& output_name) :
- oat_file_(oat_file), builder_(nullptr), elf_output_(nullptr), output_name_(output_name) {}
+ explicit OatSymbolizer(const OatFile* oat_file, const std::string& output_name) :
+ oat_file_(oat_file), builder_(nullptr), elf_output_(nullptr),
+ output_name_(output_name.empty() ? "symbolized.oat" : output_name) {
+ }
bool Init() {
Elf32_Word oat_data_size = oat_file_->GetOatHeader().GetExecutableOffset();
@@ -131,9 +133,6 @@ class OatSymbolizer : public CodeOutput {
uint32_t diff = static_cast<uint32_t>(oat_file_->End() - oat_file_->Begin());
uint32_t oat_exec_size = diff - oat_data_size;
- if (output_name_.empty()) {
- output_name_ = "symbolized.oat";
- }
elf_output_ = OS::CreateEmptyFile(output_name_.c_str());
builder_.reset(new ElfBuilder<Elf32_Word, Elf32_Sword, Elf32_Addr, Elf32_Dyn,
@@ -220,7 +219,7 @@ class OatSymbolizer : public CodeOutput {
void WalkOatClass(const OatFile::OatClass& oat_class, const DexFile& dex_file,
const DexFile::ClassDef& class_def, Callback callback) {
- const byte* class_data = dex_file.GetClassData(class_def);
+ const uint8_t* class_data = dex_file.GetClassData(class_def);
if (class_data == nullptr) { // empty class such as a marker interface?
return;
}
@@ -307,11 +306,11 @@ class OatSymbolizer : public CodeOutput {
}
ElfSymtabBuilder<Elf32_Word, Elf32_Sword, Elf32_Addr,
- Elf32_Sym, Elf32_Shdr>* symtab = &builder_->symtab_builder_;
+ Elf32_Sym, Elf32_Shdr>* symtab = builder_->GetSymtabBuilder();
- symtab->AddSymbol(pretty_name, &builder_->text_builder_, oat_method.GetCodeOffset() -
- oat_file_->GetOatHeader().GetExecutableOffset(), true,
- oat_method.GetQuickCodeSize(), STB_GLOBAL, STT_FUNC);
+ symtab->AddSymbol(pretty_name, &builder_->GetTextBuilder(),
+ oat_method.GetCodeOffset() - oat_file_->GetOatHeader().GetExecutableOffset(),
+ true, oat_method.GetQuickCodeSize(), STB_GLOBAL, STT_FUNC);
}
}
@@ -340,7 +339,7 @@ class OatSymbolizer : public CodeOutput {
Elf32_Sym, Elf32_Ehdr, Elf32_Phdr, Elf32_Shdr> > builder_;
File* elf_output_;
std::unordered_map<uint32_t, uint32_t> state_;
- std::string output_name_;
+ const std::string output_name_;
};
class OatDumperOptions {
@@ -483,8 +482,8 @@ class OatDumper {
}
size_t ComputeSize(const void* oat_data) {
- if (reinterpret_cast<const byte*>(oat_data) < oat_file_.Begin() ||
- reinterpret_cast<const byte*>(oat_data) > oat_file_.End()) {
+ if (reinterpret_cast<const uint8_t*>(oat_data) < oat_file_.Begin() ||
+ reinterpret_cast<const uint8_t*>(oat_data) > oat_file_.End()) {
return 0; // Address not in oat file
}
uintptr_t begin_offset = reinterpret_cast<uintptr_t>(oat_data) -
@@ -544,7 +543,7 @@ class OatDumper {
class_def_index++) {
const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
const OatFile::OatClass oat_class = oat_dex_file->GetOatClass(class_def_index);
- const byte* class_data = dex_file->GetClassData(class_def);
+ const uint8_t* class_data = dex_file->GetClassData(class_def);
if (class_data != nullptr) {
ClassDataItemIterator it(*dex_file, class_data);
SkipAllFields(it);
@@ -632,7 +631,7 @@ class OatDumper {
bool DumpOatClass(std::ostream& os, const OatFile::OatClass& oat_class, const DexFile& dex_file,
const DexFile::ClassDef& class_def) {
bool success = true;
- const byte* class_data = dex_file.GetClassData(class_def);
+ const uint8_t* class_data = dex_file.GetClassData(class_def);
if (class_data == nullptr) { // empty class such as a marker interface?
os << std::flush;
return success;
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index bbaf0e4492..fbb36f3e99 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -385,7 +385,7 @@ mirror::Object* PatchOat::RelocatedAddressOf(mirror::Object* obj) {
if (obj == nullptr) {
return nullptr;
} else {
- return reinterpret_cast<mirror::Object*>(reinterpret_cast<byte*>(obj) + delta_);
+ return reinterpret_cast<mirror::Object*>(reinterpret_cast<uint8_t*>(obj) + delta_);
}
}
@@ -608,7 +608,7 @@ bool PatchOat::PatchTextSection(ElfFileImpl* oat_file) {
patch_loc_t* patches_end = patches + (patches_sec->sh_size / sizeof(patch_loc_t));
auto oat_text_sec = oat_file->FindSectionByName(".text");
CHECK(oat_text_sec != nullptr);
- byte* to_patch = oat_file->Begin() + oat_text_sec->sh_offset;
+ uint8_t* to_patch = oat_file->Begin() + oat_text_sec->sh_offset;
uintptr_t to_patch_end = reinterpret_cast<uintptr_t>(to_patch) + oat_text_sec->sh_size;
for (; patches < patches_end; patches++) {
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 252e89e938..52a2a881f2 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -937,12 +937,6 @@ ENTRY art_quick_do_long_jump
END art_quick_do_long_jump
/*
- * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on
- * failure.
- */
-TWO_ARG_REF_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
-
- /*
* Entry from managed code that calls artLockObjectFromCode, may block for GC. x0 holds the
* possibly null object to lock.
*
@@ -1248,6 +1242,12 @@ END \name
.endm
/*
+ * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on
+ * failure.
+ */
+TWO_ARG_REF_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
+
+ /*
* Entry from managed code when uninitialized static storage, this stub will run the class
* initializer and deliver the exception on error. On success the static storage base is
* returned.
diff --git a/runtime/arch/x86/context_x86.cc b/runtime/arch/x86/context_x86.cc
index 37049cfd7b..a7beaa9196 100644
--- a/runtime/arch/x86/context_x86.cc
+++ b/runtime/arch/x86/context_x86.cc
@@ -81,7 +81,7 @@ void X86Context::DoLongJump() {
gprs[kNumberOfCpuRegisters - i - 1] = gprs_[i] != nullptr ? *gprs_[i] : X86Context::kBadGprBase + i;
}
// We want to load the stack pointer one slot below so that the ret will pop eip.
- uintptr_t esp = gprs[kNumberOfCpuRegisters - ESP - 1] - kWordSize;
+ uintptr_t esp = gprs[kNumberOfCpuRegisters - ESP - 1] - sizeof(intptr_t);
gprs[kNumberOfCpuRegisters] = esp;
*(reinterpret_cast<uintptr_t*>(esp)) = eip_;
__asm__ __volatile__(
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index 17310b6d95..6715fd318d 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -113,7 +113,7 @@ static uint32_t GetInstructionSize(const uint8_t* pc) {
// Group 3
case 0x66:
operand_size_prefix = true;
- // fallthrough
+ FALLTHROUGH_INTENDED;
// Group 1
case 0xf0:
diff --git a/runtime/arch/x86_64/context_x86_64.cc b/runtime/arch/x86_64/context_x86_64.cc
index 7699eaf9d4..79d0666ddc 100644
--- a/runtime/arch/x86_64/context_x86_64.cc
+++ b/runtime/arch/x86_64/context_x86_64.cc
@@ -129,7 +129,7 @@ void X86_64Context::DoLongJump() {
}
// We want to load the stack pointer one slot below so that the ret will pop eip.
- uintptr_t rsp = gprs[kNumberOfCpuRegisters - RSP - 1] - kWordSize;
+ uintptr_t rsp = gprs[kNumberOfCpuRegisters - RSP - 1] - sizeof(intptr_t);
gprs[kNumberOfCpuRegisters] = rsp;
*(reinterpret_cast<uintptr_t*>(rsp)) = rip_;
diff --git a/runtime/base/bit_field.h b/runtime/base/bit_field.h
index e041bd0d85..fd65d500aa 100644
--- a/runtime/base/bit_field.h
+++ b/runtime/base/bit_field.h
@@ -22,7 +22,7 @@
namespace art {
-static const uword kUwordOne = 1U;
+static constexpr uintptr_t kUintPtrTOne = 1U;
// BitField is a template for encoding and decoding a bit field inside
// an unsigned machine word.
@@ -31,18 +31,18 @@ class BitField {
public:
// Tells whether the provided value fits into the bit field.
static bool IsValid(T value) {
- return (static_cast<uword>(value) & ~((kUwordOne << size) - 1)) == 0;
+ return (static_cast<uintptr_t>(value) & ~((kUintPtrTOne << size) - 1)) == 0;
}
// Returns a uword mask of the bit field.
- static uword Mask() {
- return (kUwordOne << size) - 1;
+ static uintptr_t Mask() {
+ return (kUintPtrTOne << size) - 1;
}
// Returns a uword mask of the bit field which can be applied directly to
// the raw unshifted bits.
- static uword MaskInPlace() {
- return ((kUwordOne << size) - 1) << position;
+ static uintptr_t MaskInPlace() {
+ return ((kUintPtrTOne << size) - 1) << position;
}
// Returns the shift count needed to right-shift the bit field to
@@ -57,22 +57,22 @@ class BitField {
}
// Returns a uword with the bit field value encoded.
- static uword Encode(T value) {
+ static uintptr_t Encode(T value) {
DCHECK(IsValid(value));
- return static_cast<uword>(value) << position;
+ return static_cast<uintptr_t>(value) << position;
}
// Extracts the bit field from the value.
- static T Decode(uword value) {
- return static_cast<T>((value >> position) & ((kUwordOne << size) - 1));
+ static T Decode(uintptr_t value) {
+ return static_cast<T>((value >> position) & ((kUintPtrTOne << size) - 1));
}
// Returns a uword with the bit field value encoded based on the
// original value. Only the bits corresponding to this bit field
// will be changed.
- static uword Update(T value, uword original) {
+ static uintptr_t Update(T value, uintptr_t original) {
DCHECK(IsValid(value));
- return (static_cast<uword>(value) << position) |
+ return (static_cast<uintptr_t>(value) << position) |
(~MaskInPlace() & original);
}
};
diff --git a/runtime/base/hex_dump.cc b/runtime/base/hex_dump.cc
index 936c52b47a..5423ff0b5b 100644
--- a/runtime/base/hex_dump.cc
+++ b/runtime/base/hex_dump.cc
@@ -35,7 +35,7 @@ void HexDump::Dump(std::ostream& os) const {
static const char gHexDigit[] = "0123456789abcdef";
const unsigned char* addr = reinterpret_cast<const unsigned char*>(address_);
// 01234560: 00 11 22 33 44 55 66 77 88 99 aa bb cc dd ee ff 0123456789abcdef
- char out[(kBitsPerWord / 4) + /* offset */
+ char out[(kBitsPerIntPtrT / 4) + /* offset */
1 + /* colon */
(16 * 3) + /* 16 hex digits and space */
2 + /* white space */
@@ -49,7 +49,7 @@ void HexDump::Dump(std::ostream& os) const {
offset = 0;
}
memset(out, ' ', sizeof(out)-1);
- out[kBitsPerWord / 4] = ':';
+ out[kBitsPerIntPtrT / 4] = ':';
out[sizeof(out)-1] = '\0';
size_t byte_count = byte_count_;
@@ -58,11 +58,11 @@ void HexDump::Dump(std::ostream& os) const {
size_t line_offset = offset & ~0x0f;
char* hex = out;
- char* asc = out + (kBitsPerWord / 4) + /* offset */ 1 + /* colon */
+ char* asc = out + (kBitsPerIntPtrT / 4) + /* offset */ 1 + /* colon */
(16 * 3) + /* 16 hex digits and space */ 2 /* white space */;
- for (int i = 0; i < (kBitsPerWord / 4); i++) {
- *hex++ = gHexDigit[line_offset >> (kBitsPerWord - 4)];
+ for (int i = 0; i < (kBitsPerIntPtrT / 4); i++) {
+ *hex++ = gHexDigit[line_offset >> (kBitsPerIntPtrT - 4)];
line_offset <<= 4;
}
hex++;
diff --git a/runtime/base/hex_dump_test.cc b/runtime/base/hex_dump_test.cc
index 3d782b267c..bfd5c75284 100644
--- a/runtime/base/hex_dump_test.cc
+++ b/runtime/base/hex_dump_test.cc
@@ -56,7 +56,7 @@ TEST(HexDump, ShowActualAddresses) {
std::ostringstream oss;
oss << HexDump(&g16byte_aligned_number, 8, true, "");
// Compare ignoring pointer.
- EXPECT_STREQ(oss.str().c_str() + (kBitsPerWord / 4),
+ EXPECT_STREQ(oss.str().c_str() + (kBitsPerIntPtrT / 4),
": 68 67 66 65 64 63 62 61 hgfedcba ");
}
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index b66d528d1b..f5a38bbf35 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -181,6 +181,48 @@ char (&ArraySizeHelper(T (&array)[N]))[N];
template<typename T> void UNUSED(const T&) {}
#define UNREACHABLE __builtin_unreachable
+// The FALLTHROUGH_INTENDED macro can be used to annotate implicit fall-through
+// between switch labels:
+// switch (x) {
+// case 40:
+// case 41:
+// if (truth_is_out_there) {
+// ++x;
+// FALLTHROUGH_INTENDED; // Use instead of/along with annotations in
+// // comments.
+// } else {
+// return x;
+// }
+// case 42:
+// ...
+//
+// As shown in the example above, the FALLTHROUGH_INTENDED macro should be
+// followed by a semicolon. It is designed to mimic control-flow statements
+// like 'break;', so it can be placed in most places where 'break;' can, but
+// only if there are no statements on the execution path between it and the
+// next switch label.
+//
+// When compiled with clang in C++11 mode, the FALLTHROUGH_INTENDED macro is
+// expanded to [[clang::fallthrough]] attribute, which is analysed when
+// performing switch labels fall-through diagnostic ('-Wimplicit-fallthrough').
+// See clang documentation on language extensions for details:
+// http://clang.llvm.org/docs/LanguageExtensions.html#clang__fallthrough
+//
+// When used with unsupported compilers, the FALLTHROUGH_INTENDED macro has no
+// effect on diagnostics.
+//
+// In either case this macro has no effect on runtime behavior and performance
+// of code.
+#if defined(__clang__) && __cplusplus >= 201103L && defined(__has_warning)
+#if __has_feature(cxx_attributes) && __has_warning("-Wimplicit-fallthrough")
+#define FALLTHROUGH_INTENDED [[clang::fallthrough]] // NOLINT
+#endif
+#endif
+
+#ifndef FALLTHROUGH_INTENDED
+#define FALLTHROUGH_INTENDED do { } while (0)
+#endif
+
// Annotalysis thread-safety analysis support.
#if defined(__SUPPORT_TS_ANNOTATION__) || defined(__clang__)
#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
diff --git a/runtime/base/value_object.h b/runtime/base/value_object.h
new file mode 100644
index 0000000000..ee0e2a0dcc
--- /dev/null
+++ b/runtime/base/value_object.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_VALUE_OBJECT_H_
+#define ART_RUNTIME_BASE_VALUE_OBJECT_H_
+
+#include "base/logging.h"
+
+namespace art {
+
+class ValueObject {
+ public:
+ void* operator new(size_t size) {
+ LOG(FATAL) << "UNREACHABLE";
+ abort();
+ }
+ void operator delete(void*, size_t) {
+ LOG(FATAL) << "UNREACHABLE";
+ }
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_BASE_VALUE_OBJECT_H_
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index bfe44a28bc..fec1824a9e 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -1128,7 +1128,7 @@ class ScopedCheck {
*errorKind = "continuation";
return utf8;
}
- // Fall through to take care of the final byte.
+ FALLTHROUGH_INTENDED; // Fall-through to take care of the final byte.
case 0x0c:
case 0x0d:
// Bit pattern 110x, so there is one additional byte.
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 6ed27bb945..cf3a581668 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -2383,7 +2383,7 @@ mirror::Class* ClassLinker::DefineClass(Thread* self, const char* descriptor,
uint32_t ClassLinker::SizeOfClassWithoutEmbeddedTables(const DexFile& dex_file,
const DexFile::ClassDef& dex_class_def) {
- const byte* class_data = dex_file.GetClassData(dex_class_def);
+ const uint8_t* class_data = dex_file.GetClassData(dex_class_def);
size_t num_ref = 0;
size_t num_8 = 0;
size_t num_16 = 0;
@@ -2438,7 +2438,7 @@ OatFile::OatClass ClassLinker::FindOatClass(const DexFile& dex_file, uint16_t cl
static uint32_t GetOatMethodIndexFromMethodIndex(const DexFile& dex_file, uint16_t class_def_idx,
uint32_t method_idx) {
const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_idx);
- const byte* class_data = dex_file.GetClassData(class_def);
+ const uint8_t* class_data = dex_file.GetClassData(class_def);
CHECK(class_data != nullptr);
ClassDataItemIterator it(dex_file, class_data);
// Skip fields
@@ -2644,7 +2644,7 @@ void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) {
const DexFile& dex_file = klass->GetDexFile();
const DexFile::ClassDef* dex_class_def = klass->GetClassDef();
CHECK(dex_class_def != nullptr);
- const byte* class_data = dex_file.GetClassData(*dex_class_def);
+ const uint8_t* class_data = dex_file.GetClassData(*dex_class_def);
// There should always be class data if there were direct methods.
CHECK(class_data != nullptr) << PrettyDescriptor(klass);
ClassDataItemIterator it(dex_file, class_data);
@@ -2805,7 +2805,7 @@ void ClassLinker::LoadClass(Thread* self, const DexFile& dex_file,
klass->SetDexClassDefIndex(dex_file.GetIndexForClassDef(dex_class_def));
klass->SetDexTypeIndex(dex_class_def.class_idx_);
- const byte* class_data = dex_file.GetClassData(dex_class_def);
+ const uint8_t* class_data = dex_file.GetClassData(dex_class_def);
if (class_data == nullptr) {
return; // no fields or methods - for example a marker interface
}
@@ -2825,7 +2825,7 @@ void ClassLinker::LoadClass(Thread* self, const DexFile& dex_file,
}
void ClassLinker::LoadClassMembers(Thread* self, const DexFile& dex_file,
- const byte* class_data,
+ const uint8_t* class_data,
Handle<mirror::Class> klass,
mirror::ClassLoader* class_loader,
const OatFile::OatClass* oat_class) {
@@ -3785,7 +3785,7 @@ void ClassLinker::ResolveMethodExceptionHandlerTypes(const DexFile& dex_file,
if (code_item->tries_size_ == 0) {
return; // nothing to process
}
- const byte* handlers_ptr = DexFile::GetCatchHandlerData(*code_item, 0);
+ const uint8_t* handlers_ptr = DexFile::GetCatchHandlerData(*code_item, 0);
uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
ClassLinker* linker = Runtime::Current()->GetClassLinker();
for (uint32_t idx = 0; idx < handlers_size; idx++) {
@@ -4243,7 +4243,7 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache()));
EncodedStaticFieldValueIterator value_it(dex_file, &dex_cache, &class_loader,
this, *dex_class_def);
- const byte* class_data = dex_file.GetClassData(*dex_class_def);
+ const uint8_t* class_data = dex_file.GetClassData(*dex_class_def);
ClassDataItemIterator field_it(dex_file, class_data);
if (value_it.HasNext()) {
DCHECK(field_it.HasNextStaticField());
@@ -5191,36 +5191,31 @@ bool ClassLinker::LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_
void ClassLinker::CreateReferenceInstanceOffsets(Handle<mirror::Class> klass) {
uint32_t reference_offsets = 0;
mirror::Class* super_class = klass->GetSuperClass();
+ // Leave the reference offsets as 0 for mirror::Object (the class field is handled specially).
if (super_class != nullptr) {
reference_offsets = super_class->GetReferenceInstanceOffsets();
- // If our superclass overflowed, we don't stand a chance.
- if (reference_offsets == CLASS_WALK_SUPER) {
- klass->SetReferenceInstanceOffsets(reference_offsets);
- return;
- }
- }
- CreateReferenceOffsets(klass, reference_offsets);
-}
-
-void ClassLinker::CreateReferenceOffsets(Handle<mirror::Class> klass,
- uint32_t reference_offsets) {
- size_t num_reference_fields = klass->NumReferenceInstanceFieldsDuringLinking();
- mirror::ObjectArray<mirror::ArtField>* fields = klass->GetIFields();
- // All of the fields that contain object references are guaranteed
- // to be at the beginning of the fields list.
- for (size_t i = 0; i < num_reference_fields; ++i) {
- // Note that byte_offset is the offset from the beginning of
- // object, not the offset into instance data
- mirror::ArtField* field = fields->Get(i);
- MemberOffset byte_offset = field->GetOffsetDuringLinking();
- CHECK_EQ(byte_offset.Uint32Value() & (CLASS_OFFSET_ALIGNMENT - 1), 0U);
- if (CLASS_CAN_ENCODE_OFFSET(byte_offset.Uint32Value())) {
- uint32_t new_bit = CLASS_BIT_FROM_OFFSET(byte_offset.Uint32Value());
- CHECK_NE(new_bit, 0U);
- reference_offsets |= new_bit;
- } else {
- reference_offsets = CLASS_WALK_SUPER;
- break;
+ // Compute reference offsets unless our superclass overflowed.
+ if (reference_offsets != mirror::Class::kClassWalkSuper) {
+ size_t num_reference_fields = klass->NumReferenceInstanceFieldsDuringLinking();
+ mirror::ObjectArray<mirror::ArtField>* fields = klass->GetIFields();
+ // All of the fields that contain object references are guaranteed
+ // to be at the beginning of the fields list.
+ for (size_t i = 0; i < num_reference_fields; ++i) {
+ // Note that byte_offset is the offset from the beginning of
+ // object, not the offset into instance data
+ mirror::ArtField* field = fields->Get(i);
+ MemberOffset byte_offset = field->GetOffsetDuringLinking();
+ uint32_t displaced_bitmap_position =
+ (byte_offset.Uint32Value() - mirror::kObjectHeaderSize) /
+ sizeof(mirror::HeapReference<mirror::Object>);
+ if (displaced_bitmap_position >= 32) {
+ // Can't encode offset so fall back on slow-path.
+ reference_offsets = mirror::Class::kClassWalkSuper;
+ break;
+ } else {
+ reference_offsets |= (1 << displaced_bitmap_position);
+ }
+ }
}
}
klass->SetReferenceInstanceOffsets(reference_offsets);
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 111dd6369e..373fa893ea 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -461,7 +461,7 @@ class ClassLinker {
void LoadClass(Thread* self, const DexFile& dex_file, const DexFile::ClassDef& dex_class_def,
Handle<mirror::Class> klass, mirror::ClassLoader* class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void LoadClassMembers(Thread* self, const DexFile& dex_file, const byte* class_data,
+ void LoadClassMembers(Thread* self, const DexFile& dex_file, const uint8_t* class_data,
Handle<mirror::Class> klass, mirror::ClassLoader* class_loader,
const OatFile::OatClass* oat_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -539,8 +539,6 @@ class ClassLinker {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CreateReferenceInstanceOffsets(Handle<mirror::Class> klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CreateReferenceOffsets(Handle<mirror::Class> klass, uint32_t reference_offsets)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// For use by ImageWriter to find DexCaches for its roots
ReaderWriterMutex* DexLock()
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 613ac66843..e990181aa8 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -47,9 +47,9 @@ class ClassLinkerTest : public CommonRuntimeTest {
void AssertNonExistentClass(const std::string& descriptor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Thread* self = Thread::Current();
- EXPECT_TRUE(class_linker_->FindSystemClass(self, descriptor.c_str()) == NULL);
+ EXPECT_TRUE(class_linker_->FindSystemClass(self, descriptor.c_str()) == nullptr);
EXPECT_TRUE(self->IsExceptionPending());
- mirror::Object* exception = self->GetException(NULL);
+ mirror::Object* exception = self->GetException(nullptr);
self->ClearException();
mirror::Class* exception_class =
class_linker_->FindSystemClass(self, "Ljava/lang/NoClassDefFoundError;");
@@ -64,15 +64,15 @@ class ClassLinkerTest : public CommonRuntimeTest {
void AssertPrimitiveClass(const std::string& descriptor, mirror::Class* primitive)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ASSERT_TRUE(primitive != NULL);
- ASSERT_TRUE(primitive->GetClass() != NULL);
+ ASSERT_TRUE(primitive != nullptr);
+ ASSERT_TRUE(primitive->GetClass() != nullptr);
ASSERT_EQ(primitive->GetClass(), primitive->GetClass()->GetClass());
- EXPECT_TRUE(primitive->GetClass()->GetSuperClass() != NULL);
+ EXPECT_TRUE(primitive->GetClass()->GetSuperClass() != nullptr);
std::string temp;
ASSERT_STREQ(descriptor.c_str(), primitive->GetDescriptor(&temp));
- EXPECT_TRUE(primitive->GetSuperClass() == NULL);
+ EXPECT_TRUE(primitive->GetSuperClass() == nullptr);
EXPECT_FALSE(primitive->HasSuperClass());
- EXPECT_TRUE(primitive->GetClassLoader() == NULL);
+ EXPECT_TRUE(primitive->GetClassLoader() == nullptr);
EXPECT_EQ(mirror::Class::kStatusInitialized, primitive->GetStatus());
EXPECT_FALSE(primitive->IsErroneous());
EXPECT_TRUE(primitive->IsLoaded());
@@ -81,7 +81,7 @@ class ClassLinkerTest : public CommonRuntimeTest {
EXPECT_TRUE(primitive->IsInitialized());
EXPECT_FALSE(primitive->IsArrayInstance());
EXPECT_FALSE(primitive->IsArrayClass());
- EXPECT_TRUE(primitive->GetComponentType() == NULL);
+ EXPECT_TRUE(primitive->GetComponentType() == nullptr);
EXPECT_FALSE(primitive->IsInterface());
EXPECT_TRUE(primitive->IsPublic());
EXPECT_TRUE(primitive->IsFinal());
@@ -94,7 +94,7 @@ class ClassLinkerTest : public CommonRuntimeTest {
EXPECT_EQ(0U, primitive->NumDirectInterfaces());
EXPECT_FALSE(primitive->HasVTable());
EXPECT_EQ(0, primitive->GetIfTableCount());
- EXPECT_TRUE(primitive->GetIfTable() == NULL);
+ EXPECT_TRUE(primitive->GetIfTable() == nullptr);
EXPECT_EQ(kAccPublic | kAccFinal | kAccAbstract, primitive->GetAccessFlags());
}
@@ -116,17 +116,17 @@ class ClassLinkerTest : public CommonRuntimeTest {
void AssertArrayClass(const std::string& array_descriptor, Handle<mirror::Class> array)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ASSERT_TRUE(array.Get() != NULL);
- ASSERT_TRUE(array->GetClass() != NULL);
+ ASSERT_TRUE(array.Get() != nullptr);
+ ASSERT_TRUE(array->GetClass() != nullptr);
ASSERT_EQ(array->GetClass(), array->GetClass()->GetClass());
- EXPECT_TRUE(array->GetClass()->GetSuperClass() != NULL);
+ EXPECT_TRUE(array->GetClass()->GetSuperClass() != nullptr);
std::string temp;
ASSERT_STREQ(array_descriptor.c_str(), array->GetDescriptor(&temp));
- EXPECT_TRUE(array->GetSuperClass() != NULL);
+ EXPECT_TRUE(array->GetSuperClass() != nullptr);
Thread* self = Thread::Current();
EXPECT_EQ(class_linker_->FindSystemClass(self, "Ljava/lang/Object;"), array->GetSuperClass());
EXPECT_TRUE(array->HasSuperClass());
- ASSERT_TRUE(array->GetComponentType() != NULL);
+ ASSERT_TRUE(array->GetComponentType() != nullptr);
ASSERT_GT(strlen(array->GetComponentType()->GetDescriptor(&temp)), 0U);
EXPECT_EQ(mirror::Class::kStatusInitialized, array->GetStatus());
EXPECT_FALSE(array->IsErroneous());
@@ -148,7 +148,7 @@ class ClassLinkerTest : public CommonRuntimeTest {
EXPECT_EQ(2U, array->NumDirectInterfaces());
EXPECT_TRUE(array->ShouldHaveEmbeddedImtAndVTable());
EXPECT_EQ(2, array->GetIfTableCount());
- ASSERT_TRUE(array->GetIfTable() != NULL);
+ ASSERT_TRUE(array->GetIfTable() != nullptr);
mirror::Class* direct_interface0 = mirror::Class::GetDirectInterface(self, array, 0);
EXPECT_TRUE(direct_interface0 != nullptr);
EXPECT_STREQ(direct_interface0->GetDescriptor(&temp), "Ljava/lang/Cloneable;");
@@ -177,13 +177,13 @@ class ClassLinkerTest : public CommonRuntimeTest {
void AssertField(mirror::Class* klass, mirror::ArtField* field)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- EXPECT_TRUE(field != NULL);
- EXPECT_TRUE(field->GetClass() != NULL);
+ EXPECT_TRUE(field != nullptr);
+ EXPECT_TRUE(field->GetClass() != nullptr);
EXPECT_EQ(klass, field->GetDeclaringClass());
- EXPECT_TRUE(field->GetName() != NULL);
+ EXPECT_TRUE(field->GetName() != nullptr);
StackHandleScope<1> hs(Thread::Current());
FieldHelper fh(hs.NewHandle(field));
- EXPECT_TRUE(fh.GetType() != NULL);
+ EXPECT_TRUE(fh.GetType() != nullptr);
}
void AssertClass(const std::string& descriptor, Handle<mirror::Class> klass)
@@ -194,16 +194,16 @@ class ClassLinkerTest : public CommonRuntimeTest {
EXPECT_FALSE(klass->HasSuperClass());
} else {
EXPECT_TRUE(klass->HasSuperClass());
- EXPECT_TRUE(klass->GetSuperClass() != NULL);
+ EXPECT_TRUE(klass->GetSuperClass() != nullptr);
}
- EXPECT_TRUE(klass->GetClass() != NULL);
+ EXPECT_TRUE(klass->GetClass() != nullptr);
EXPECT_EQ(klass->GetClass(), klass->GetClass()->GetClass());
- EXPECT_TRUE(klass->GetDexCache() != NULL);
+ EXPECT_TRUE(klass->GetDexCache() != nullptr);
EXPECT_TRUE(klass->IsLoaded());
EXPECT_TRUE(klass->IsResolved());
EXPECT_FALSE(klass->IsErroneous());
EXPECT_FALSE(klass->IsArrayClass());
- EXPECT_TRUE(klass->GetComponentType() == NULL);
+ EXPECT_TRUE(klass->GetComponentType() == nullptr);
EXPECT_TRUE(klass->IsInSamePackage(klass.Get()));
std::string temp2;
EXPECT_TRUE(mirror::Class::IsInSamePackage(klass->GetDescriptor(&temp),
@@ -225,7 +225,7 @@ class ClassLinkerTest : public CommonRuntimeTest {
mirror::IfTable* iftable = klass->GetIfTable();
for (int i = 0; i < klass->GetIfTableCount(); i++) {
mirror::Class* interface = iftable->GetInterface(i);
- ASSERT_TRUE(interface != NULL);
+ ASSERT_TRUE(interface != nullptr);
if (klass->IsInterface()) {
EXPECT_EQ(0U, iftable->GetMethodArrayCount(i));
} else {
@@ -285,7 +285,7 @@ class ClassLinkerTest : public CommonRuntimeTest {
FieldHelper fh(fhandle);
ASSERT_TRUE(!field->IsPrimitiveType());
mirror::Class* field_type = fh.GetType();
- ASSERT_TRUE(field_type != NULL);
+ ASSERT_TRUE(field_type != nullptr);
ASSERT_TRUE(!field_type->IsPrimitive());
}
for (size_t i = klass->NumReferenceInstanceFields(); i < klass->NumInstanceFields(); i++) {
@@ -293,7 +293,7 @@ class ClassLinkerTest : public CommonRuntimeTest {
fhandle.Assign(field);
FieldHelper fh(fhandle);
mirror::Class* field_type = fh.GetType();
- ASSERT_TRUE(field_type != NULL);
+ ASSERT_TRUE(field_type != nullptr);
if (!fh.GetField()->IsPrimitiveType() || !field_type->IsPrimitive()) {
// While Reference.referent is not primitive, the ClassLinker
// treats it as such so that the garbage collector won't scan it.
@@ -301,18 +301,24 @@ class ClassLinkerTest : public CommonRuntimeTest {
}
}
- size_t total_num_reference_instance_fields = 0;
+ uint32_t total_num_reference_instance_fields = 0;
mirror::Class* k = klass.Get();
- while (k != NULL) {
+ while (k != nullptr) {
total_num_reference_instance_fields += k->NumReferenceInstanceFields();
k = k->GetSuperClass();
}
- EXPECT_EQ(klass->GetReferenceInstanceOffsets() == 0, total_num_reference_instance_fields == 0);
+ EXPECT_GE(total_num_reference_instance_fields, 1U); // Should always have Object's class.
+ if (klass->GetReferenceInstanceOffsets() != mirror::Class::kClassWalkSuper) {
+ // The reference instance offsets have a bit set for each reference offset.
+ // +1 for Object's class.
+ EXPECT_EQ(static_cast<uint32_t>(POPCOUNT(klass->GetReferenceInstanceOffsets())) + 1,
+ total_num_reference_instance_fields);
+ }
}
void AssertDexFileClass(mirror::ClassLoader* class_loader, const std::string& descriptor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ASSERT_TRUE(descriptor != NULL);
+ ASSERT_TRUE(descriptor != nullptr);
Thread* self = Thread::Current();
StackHandleScope<1> hs(self);
Handle<mirror::Class> klass(
@@ -332,7 +338,7 @@ class ClassLinkerTest : public CommonRuntimeTest {
void AssertDexFile(const DexFile* dex, mirror::ClassLoader* class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ASSERT_TRUE(dex != NULL);
+ ASSERT_TRUE(dex != nullptr);
// Verify all the classes defined in this file
for (size_t i = 0; i < dex->NumClassDefs(); i++) {
@@ -346,17 +352,17 @@ class ClassLinkerTest : public CommonRuntimeTest {
const char* descriptor = dex->GetTypeDescriptor(type_id);
AssertDexFileClass(class_loader, descriptor);
}
- class_linker_->VisitRoots(TestRootVisitor, NULL, kVisitRootFlagAllRoots);
+ class_linker_->VisitRoots(TestRootVisitor, nullptr, kVisitRootFlagAllRoots);
// Verify the dex cache has resolution methods in all resolved method slots
mirror::DexCache* dex_cache = class_linker_->FindDexCache(*dex);
mirror::ObjectArray<mirror::ArtMethod>* resolved_methods = dex_cache->GetResolvedMethods();
for (size_t i = 0; i < static_cast<size_t>(resolved_methods->GetLength()); i++) {
- EXPECT_TRUE(resolved_methods->Get(i) != NULL) << dex->GetLocation() << " i=" << i;
+ EXPECT_TRUE(resolved_methods->Get(i) != nullptr) << dex->GetLocation() << " i=" << i;
}
}
static void TestRootVisitor(mirror::Object** root, void*, uint32_t, RootType) {
- EXPECT_TRUE(*root != NULL);
+ EXPECT_TRUE(*root != nullptr);
}
};
@@ -378,7 +384,7 @@ struct CheckOffsets {
Thread* self = Thread::Current();
mirror::Class* klass =
Runtime::Current()->GetClassLinker()->FindSystemClass(self, class_descriptor.c_str());
- CHECK(klass != NULL) << class_descriptor;
+ CHECK(klass != nullptr) << class_descriptor;
bool error = false;
@@ -647,12 +653,12 @@ TEST_F(ClassLinkerTest, FindClassNested) {
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("Nested"))));
mirror::Class* outer = class_linker_->FindClass(soa.Self(), "LNested;", class_loader);
- ASSERT_TRUE(outer != NULL);
+ ASSERT_TRUE(outer != nullptr);
EXPECT_EQ(0U, outer->NumVirtualMethods());
EXPECT_EQ(1U, outer->NumDirectMethods());
mirror::Class* inner = class_linker_->FindClass(soa.Self(), "LNested$Inner;", class_loader);
- ASSERT_TRUE(inner != NULL);
+ ASSERT_TRUE(inner != nullptr);
EXPECT_EQ(0U, inner->NumVirtualMethods());
EXPECT_EQ(1U, inner->NumDirectMethods());
}
@@ -674,15 +680,15 @@ TEST_F(ClassLinkerTest, FindClass_Primitives) {
TEST_F(ClassLinkerTest, FindClass) {
ScopedObjectAccess soa(Thread::Current());
mirror::Class* JavaLangObject = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;");
- ASSERT_TRUE(JavaLangObject != NULL);
- ASSERT_TRUE(JavaLangObject->GetClass() != NULL);
+ ASSERT_TRUE(JavaLangObject != nullptr);
+ ASSERT_TRUE(JavaLangObject->GetClass() != nullptr);
ASSERT_EQ(JavaLangObject->GetClass(), JavaLangObject->GetClass()->GetClass());
EXPECT_EQ(JavaLangObject, JavaLangObject->GetClass()->GetSuperClass());
std::string temp;
ASSERT_STREQ(JavaLangObject->GetDescriptor(&temp), "Ljava/lang/Object;");
- EXPECT_TRUE(JavaLangObject->GetSuperClass() == NULL);
+ EXPECT_TRUE(JavaLangObject->GetSuperClass() == nullptr);
EXPECT_FALSE(JavaLangObject->HasSuperClass());
- EXPECT_TRUE(JavaLangObject->GetClassLoader() == NULL);
+ EXPECT_TRUE(JavaLangObject->GetClassLoader() == nullptr);
EXPECT_EQ(mirror::Class::kStatusInitialized, JavaLangObject->GetStatus());
EXPECT_FALSE(JavaLangObject->IsErroneous());
EXPECT_TRUE(JavaLangObject->IsLoaded());
@@ -691,7 +697,7 @@ TEST_F(ClassLinkerTest, FindClass) {
EXPECT_TRUE(JavaLangObject->IsInitialized());
EXPECT_FALSE(JavaLangObject->IsArrayInstance());
EXPECT_FALSE(JavaLangObject->IsArrayClass());
- EXPECT_TRUE(JavaLangObject->GetComponentType() == NULL);
+ EXPECT_TRUE(JavaLangObject->GetComponentType() == nullptr);
EXPECT_FALSE(JavaLangObject->IsInterface());
EXPECT_TRUE(JavaLangObject->IsPublic());
EXPECT_FALSE(JavaLangObject->IsFinal());
@@ -719,8 +725,8 @@ TEST_F(ClassLinkerTest, FindClass) {
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("MyClass"))));
AssertNonExistentClass("LMyClass;");
mirror::Class* MyClass = class_linker_->FindClass(soa.Self(), "LMyClass;", class_loader);
- ASSERT_TRUE(MyClass != NULL);
- ASSERT_TRUE(MyClass->GetClass() != NULL);
+ ASSERT_TRUE(MyClass != nullptr);
+ ASSERT_TRUE(MyClass->GetClass() != nullptr);
ASSERT_EQ(MyClass->GetClass(), MyClass->GetClass()->GetClass());
EXPECT_EQ(JavaLangObject, MyClass->GetClass()->GetSuperClass());
ASSERT_STREQ(MyClass->GetDescriptor(&temp), "LMyClass;");
@@ -735,7 +741,7 @@ TEST_F(ClassLinkerTest, FindClass) {
EXPECT_FALSE(MyClass->IsInitialized());
EXPECT_FALSE(MyClass->IsArrayInstance());
EXPECT_FALSE(MyClass->IsArrayClass());
- EXPECT_TRUE(MyClass->GetComponentType() == NULL);
+ EXPECT_TRUE(MyClass->GetComponentType() == nullptr);
EXPECT_FALSE(MyClass->IsInterface());
EXPECT_FALSE(MyClass->IsPublic());
EXPECT_FALSE(MyClass->IsFinal());
@@ -750,10 +756,10 @@ TEST_F(ClassLinkerTest, FindClass) {
EXPECT_EQ(JavaLangObject->GetClass()->GetClass(), MyClass->GetClass()->GetClass());
// created by class_linker
- AssertArrayClass("[C", "C", NULL);
- AssertArrayClass("[Ljava/lang/Object;", "Ljava/lang/Object;", NULL);
+ AssertArrayClass("[C", "C", nullptr);
+ AssertArrayClass("[Ljava/lang/Object;", "Ljava/lang/Object;", nullptr);
// synthesized on the fly
- AssertArrayClass("[[C", "[C", NULL);
+ AssertArrayClass("[[C", "[C", nullptr);
AssertArrayClass("[[[LMyClass;", "[[LMyClass;", class_loader.Get());
// or not available at all
AssertNonExistentClass("[[[[LNonExistentClass;");
@@ -761,7 +767,7 @@ TEST_F(ClassLinkerTest, FindClass) {
TEST_F(ClassLinkerTest, LibCore) {
ScopedObjectAccess soa(Thread::Current());
- AssertDexFile(java_lang_dex_file_, NULL);
+ AssertDexFile(java_lang_dex_file_, nullptr);
}
// The first reference array element must be a multiple of 4 bytes from the
@@ -863,7 +869,7 @@ TEST_F(ClassLinkerTest, StaticFields) {
// expression resolve to a copy of a constant value from the constant pool.
// So <clinit> should be null.
mirror::ArtMethod* clinit = statics->FindDirectMethod("<clinit>", "()V");
- EXPECT_TRUE(clinit == NULL);
+ EXPECT_TRUE(clinit == nullptr);
EXPECT_EQ(9U, statics->NumStaticFields());
@@ -960,15 +966,15 @@ TEST_F(ClassLinkerTest, Interfaces) {
mirror::ArtMethod* Ai = A->FindVirtualMethod("i", void_sig);
mirror::ArtMethod* Aj1 = A->FindVirtualMethod("j1", void_sig);
mirror::ArtMethod* Aj2 = A->FindVirtualMethod("j2", void_sig);
- ASSERT_TRUE(Ii != NULL);
- ASSERT_TRUE(Jj1 != NULL);
- ASSERT_TRUE(Jj2 != NULL);
- ASSERT_TRUE(Kj1 != NULL);
- ASSERT_TRUE(Kj2 != NULL);
- ASSERT_TRUE(Kk != NULL);
- ASSERT_TRUE(Ai != NULL);
- ASSERT_TRUE(Aj1 != NULL);
- ASSERT_TRUE(Aj2 != NULL);
+ ASSERT_TRUE(Ii != nullptr);
+ ASSERT_TRUE(Jj1 != nullptr);
+ ASSERT_TRUE(Jj2 != nullptr);
+ ASSERT_TRUE(Kj1 != nullptr);
+ ASSERT_TRUE(Kj2 != nullptr);
+ ASSERT_TRUE(Kk != nullptr);
+ ASSERT_TRUE(Ai != nullptr);
+ ASSERT_TRUE(Aj1 != nullptr);
+ ASSERT_TRUE(Aj2 != nullptr);
EXPECT_NE(Ii, Ai);
EXPECT_NE(Jj1, Aj1);
EXPECT_NE(Jj2, Aj2);
@@ -989,7 +995,7 @@ TEST_F(ClassLinkerTest, Interfaces) {
"Ljava/lang/String;");
mirror::ArtField* Kfoo = mirror::Class::FindStaticField(soa.Self(), K, "foo",
"Ljava/lang/String;");
- ASSERT_TRUE(Afoo != NULL);
+ ASSERT_TRUE(Afoo != nullptr);
EXPECT_EQ(Afoo, Bfoo);
EXPECT_EQ(Afoo, Jfoo);
EXPECT_EQ(Afoo, Kfoo);
@@ -1007,20 +1013,20 @@ TEST_F(ClassLinkerTest, ResolveVerifyAndClinit) {
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
const DexFile* dex_file = Runtime::Current()->GetCompileTimeClassPath(jclass_loader)[0];
- CHECK(dex_file != NULL);
+ CHECK(dex_file != nullptr);
mirror::Class* klass = class_linker_->FindClass(soa.Self(), "LStaticsFromCode;", class_loader);
mirror::ArtMethod* clinit = klass->FindClassInitializer();
mirror::ArtMethod* getS0 = klass->FindDirectMethod("getS0", "()Ljava/lang/Object;");
const DexFile::StringId* string_id = dex_file->FindStringId("LStaticsFromCode;");
- ASSERT_TRUE(string_id != NULL);
+ ASSERT_TRUE(string_id != nullptr);
const DexFile::TypeId* type_id = dex_file->FindTypeId(dex_file->GetIndexForStringId(*string_id));
- ASSERT_TRUE(type_id != NULL);
+ ASSERT_TRUE(type_id != nullptr);
uint32_t type_idx = dex_file->GetIndexForTypeId(*type_id);
mirror::Class* uninit = ResolveVerifyAndClinit(type_idx, clinit, Thread::Current(), true, false);
- EXPECT_TRUE(uninit != NULL);
+ EXPECT_TRUE(uninit != nullptr);
EXPECT_FALSE(uninit->IsInitialized());
mirror::Class* init = ResolveVerifyAndClinit(type_idx, getS0, Thread::Current(), true, false);
- EXPECT_TRUE(init != NULL);
+ EXPECT_TRUE(init != nullptr);
EXPECT_TRUE(init->IsInitialized());
}
@@ -1117,7 +1123,7 @@ TEST_F(ClassLinkerTest, Preverified_InitializedBoot) {
ScopedObjectAccess soa(Thread::Current());
mirror::Class* JavaLangObject = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;");
- ASSERT_TRUE(JavaLangObject != NULL);
+ ASSERT_TRUE(JavaLangObject != nullptr);
EXPECT_TRUE(JavaLangObject->IsInitialized()) << "Not testing already initialized class from the "
"core";
CheckPreverified(JavaLangObject, true);
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 96b44bfdf7..c53f6b2ea0 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -3504,6 +3504,7 @@ static char JdwpTagToShortyChar(JDWP::JdwpTag tag) {
switch (tag) {
default:
LOG(FATAL) << "unknown JDWP tag: " << PrintableChar(tag);
+ UNREACHABLE();
// Primitives.
case JDWP::JT_BYTE: return 'B';
diff --git a/runtime/dex_file-inl.h b/runtime/dex_file-inl.h
index e095c4859c..c68fdca03e 100644
--- a/runtime/dex_file-inl.h
+++ b/runtime/dex_file-inl.h
@@ -26,14 +26,14 @@
namespace art {
inline int32_t DexFile::GetStringLength(const StringId& string_id) const {
- const byte* ptr = begin_ + string_id.string_data_off_;
+ const uint8_t* ptr = begin_ + string_id.string_data_off_;
return DecodeUnsignedLeb128(&ptr);
}
inline const char* DexFile::GetStringDataAndUtf16Length(const StringId& string_id,
uint32_t* utf16_length) const {
DCHECK(utf16_length != NULL) << GetLocation();
- const byte* ptr = begin_ + string_id.string_data_off_;
+ const uint8_t* ptr = begin_ + string_id.string_data_off_;
*utf16_length = DecodeUnsignedLeb128(&ptr);
return reinterpret_cast<const char*>(ptr);
}
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 6ef62c5cd6..f408386617 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -47,8 +47,8 @@
namespace art {
-const byte DexFile::kDexMagic[] = { 'd', 'e', 'x', '\n' };
-const byte DexFile::kDexMagicVersion[] = { '0', '3', '5', '\0' };
+const uint8_t DexFile::kDexMagic[] = { 'd', 'e', 'x', '\n' };
+const uint8_t DexFile::kDexMagicVersion[] = { '0', '3', '5', '\0' };
static int OpenAndReadMagic(const char* filename, uint32_t* magic, std::string* error_msg) {
CHECK(magic != NULL);
@@ -323,7 +323,7 @@ bool DexFile::OpenFromZip(const ZipArchive& zip_archive, const std::string& loca
}
-const DexFile* DexFile::OpenMemory(const byte* base,
+const DexFile* DexFile::OpenMemory(const uint8_t* base,
size_t size,
const std::string& location,
uint32_t location_checksum,
@@ -337,7 +337,7 @@ const DexFile* DexFile::OpenMemory(const byte* base,
}
}
-DexFile::DexFile(const byte* base, size_t size,
+DexFile::DexFile(const uint8_t* base, size_t size,
const std::string& location,
uint32_t location_checksum,
MemMap* mem_map)
@@ -399,12 +399,12 @@ bool DexFile::CheckMagicAndVersion(std::string* error_msg) const {
return true;
}
-bool DexFile::IsMagicValid(const byte* magic) {
+bool DexFile::IsMagicValid(const uint8_t* magic) {
return (memcmp(magic, kDexMagic, sizeof(kDexMagic)) == 0);
}
-bool DexFile::IsVersionValid(const byte* magic) {
- const byte* version = &magic[sizeof(kDexMagic)];
+bool DexFile::IsVersionValid(const uint8_t* magic) {
+ const uint8_t* version = &magic[sizeof(kDexMagic)];
return (memcmp(version, kDexMagicVersion, sizeof(kDexMagicVersion)) == 0);
}
@@ -754,7 +754,7 @@ int32_t DexFile::FindCatchHandlerOffset(const CodeItem &code_item, uint32_t addr
void DexFile::DecodeDebugInfo0(const CodeItem* code_item, bool is_static, uint32_t method_idx,
DexDebugNewPositionCb position_cb, DexDebugNewLocalCb local_cb,
- void* context, const byte* stream, LocalInfo* local_in_reg) const {
+ void* context, const uint8_t* stream, LocalInfo* local_in_reg) const {
uint32_t line = DecodeUnsignedLeb128(&stream);
uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
uint16_t arg_reg = code_item->registers_size_ - code_item->ins_size_;
@@ -919,7 +919,7 @@ void DexFile::DecodeDebugInfo(const CodeItem* code_item, bool is_static, uint32_
DexDebugNewPositionCb position_cb, DexDebugNewLocalCb local_cb,
void* context) const {
DCHECK(code_item != nullptr);
- const byte* stream = GetDebugInfoStream(code_item);
+ const uint8_t* stream = GetDebugInfoStream(code_item);
std::unique_ptr<LocalInfo[]> local_in_reg(local_cb != NULL ?
new LocalInfo[code_item->registers_size_] :
NULL);
@@ -1059,7 +1059,7 @@ void ClassDataItemIterator::ReadClassDataMethod() {
}
// Read a signed integer. "zwidth" is the zero-based byte count.
-static int32_t ReadSignedInt(const byte* ptr, int zwidth) {
+static int32_t ReadSignedInt(const uint8_t* ptr, int zwidth) {
int32_t val = 0;
for (int i = zwidth; i >= 0; --i) {
val = ((uint32_t)val >> 8) | (((int32_t)*ptr++) << 24);
@@ -1070,7 +1070,7 @@ static int32_t ReadSignedInt(const byte* ptr, int zwidth) {
// Read an unsigned integer. "zwidth" is the zero-based byte count,
// "fill_on_right" indicates which side we want to zero-fill from.
-static uint32_t ReadUnsignedInt(const byte* ptr, int zwidth, bool fill_on_right) {
+static uint32_t ReadUnsignedInt(const uint8_t* ptr, int zwidth, bool fill_on_right) {
uint32_t val = 0;
if (!fill_on_right) {
for (int i = zwidth; i >= 0; --i) {
@@ -1086,7 +1086,7 @@ static uint32_t ReadUnsignedInt(const byte* ptr, int zwidth, bool fill_on_right)
}
// Read a signed long. "zwidth" is the zero-based byte count.
-static int64_t ReadSignedLong(const byte* ptr, int zwidth) {
+static int64_t ReadSignedLong(const uint8_t* ptr, int zwidth) {
int64_t val = 0;
for (int i = zwidth; i >= 0; --i) {
val = ((uint64_t)val >> 8) | (((int64_t)*ptr++) << 56);
@@ -1097,7 +1097,7 @@ static int64_t ReadSignedLong(const byte* ptr, int zwidth) {
// Read an unsigned long. "zwidth" is the zero-based byte count,
// "fill_on_right" indicates which side we want to zero-fill from.
-static uint64_t ReadUnsignedLong(const byte* ptr, int zwidth, bool fill_on_right) {
+static uint64_t ReadUnsignedLong(const uint8_t* ptr, int zwidth, bool fill_on_right) {
uint64_t val = 0;
if (!fill_on_right) {
for (int i = zwidth; i >= 0; --i) {
@@ -1137,8 +1137,8 @@ void EncodedStaticFieldValueIterator::Next() {
if (pos_ >= array_size_) {
return;
}
- byte value_type = *ptr_++;
- byte value_arg = value_type >> kEncodedValueArgShift;
+ uint8_t value_type = *ptr_++;
+ uint8_t value_arg = value_type >> kEncodedValueArgShift;
size_t width = value_arg + 1; // assume and correct later
type_ = static_cast<ValueType>(value_type & kEncodedValueTypeMask);
switch (type_) {
@@ -1266,7 +1266,7 @@ void CatchHandlerIterator::Init(const DexFile::CodeItem& code_item,
}
}
-void CatchHandlerIterator::Init(const byte* handler_data) {
+void CatchHandlerIterator::Init(const uint8_t* handler_data) {
current_data_ = handler_data;
remaining_count_ = DecodeSignedLeb128(&current_data_);
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index c160253019..620bd6eeab 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -50,10 +50,10 @@ class ZipArchive;
// TODO: move all of the macro functionality into the DexCache class.
class DexFile {
public:
- static const byte kDexMagic[];
- static const byte kDexMagicVersion[];
- static const size_t kSha1DigestSize = 20;
- static const uint32_t kDexEndianConstant = 0x12345678;
+ static const uint8_t kDexMagic[];
+ static const uint8_t kDexMagicVersion[];
+ static constexpr size_t kSha1DigestSize = 20;
+ static constexpr uint32_t kDexEndianConstant = 0x12345678;
// name of the DexFile entry within a zip archive
static const char* kClassesDex;
@@ -440,10 +440,10 @@ class DexFile {
uint32_t GetVersion() const;
// Returns true if the byte string points to the magic value.
- static bool IsMagicValid(const byte* magic);
+ static bool IsMagicValid(const uint8_t* magic);
// Returns true if the byte string after the magic is the correct value.
- static bool IsVersionValid(const byte* magic);
+ static bool IsVersionValid(const uint8_t* magic);
// Returns the number of string identifiers in the .dex file.
size_t NumStringIds() const {
@@ -658,13 +658,13 @@ class DexFile {
if (class_def.interfaces_off_ == 0) {
return NULL;
} else {
- const byte* addr = begin_ + class_def.interfaces_off_;
+ const uint8_t* addr = begin_ + class_def.interfaces_off_;
return reinterpret_cast<const TypeList*>(addr);
}
}
// Returns a pointer to the raw memory mapped class_data_item
- const byte* GetClassData(const ClassDef& class_def) const {
+ const uint8_t* GetClassData(const ClassDef& class_def) const {
if (class_def.class_data_off_ == 0) {
return NULL;
} else {
@@ -677,7 +677,7 @@ class DexFile {
if (code_off == 0) {
return NULL; // native or abstract method
} else {
- const byte* addr = begin_ + code_off;
+ const uint8_t* addr = begin_ + code_off;
return reinterpret_cast<const CodeItem*>(addr);
}
}
@@ -730,12 +730,12 @@ class DexFile {
if (proto_id.parameters_off_ == 0) {
return NULL;
} else {
- const byte* addr = begin_ + proto_id.parameters_off_;
+ const uint8_t* addr = begin_ + proto_id.parameters_off_;
return reinterpret_cast<const TypeList*>(addr);
}
}
- const byte* GetEncodedStaticFieldValuesArray(const ClassDef& class_def) const {
+ const uint8_t* GetEncodedStaticFieldValuesArray(const ClassDef& class_def) const {
if (class_def.static_values_off_ == 0) {
return 0;
} else {
@@ -746,9 +746,9 @@ class DexFile {
static const TryItem* GetTryItems(const CodeItem& code_item, uint32_t offset);
// Get the base of the encoded data for the given DexCode.
- static const byte* GetCatchHandlerData(const CodeItem& code_item, uint32_t offset) {
- const byte* handler_data =
- reinterpret_cast<const byte*>(GetTryItems(code_item, code_item.tries_size_));
+ static const uint8_t* GetCatchHandlerData(const CodeItem& code_item, uint32_t offset) {
+ const uint8_t* handler_data =
+ reinterpret_cast<const uint8_t*>(GetTryItems(code_item, code_item.tries_size_));
return handler_data + offset;
}
@@ -759,7 +759,7 @@ class DexFile {
static int32_t FindCatchHandlerOffset(const CodeItem &code_item, uint32_t address);
// Get the pointer to the start of the debugging data
- const byte* GetDebugInfoStream(const CodeItem* code_item) const {
+ const uint8_t* GetDebugInfoStream(const CodeItem* code_item) const {
if (code_item->debug_info_off_ == 0) {
return NULL;
} else {
@@ -862,7 +862,7 @@ class DexFile {
bool DisableWrite() const;
- const byte* Begin() const {
+ const uint8_t* Begin() const {
return begin_;
}
@@ -917,14 +917,14 @@ class DexFile {
std::string* error_msg);
// Opens a .dex file at the given address, optionally backed by a MemMap
- static const DexFile* OpenMemory(const byte* dex_file,
+ static const DexFile* OpenMemory(const uint8_t* dex_file,
size_t size,
const std::string& location,
uint32_t location_checksum,
MemMap* mem_map,
std::string* error_msg);
- DexFile(const byte* base, size_t size,
+ DexFile(const uint8_t* base, size_t size,
const std::string& location,
uint32_t location_checksum,
MemMap* mem_map);
@@ -937,7 +937,7 @@ class DexFile {
void DecodeDebugInfo0(const CodeItem* code_item, bool is_static, uint32_t method_idx,
DexDebugNewPositionCb position_cb, DexDebugNewLocalCb local_cb,
- void* context, const byte* stream, LocalInfo* local_in_reg) const;
+ void* context, const uint8_t* stream, LocalInfo* local_in_reg) const;
// Check whether a location denotes a multidex dex file. This is a very simple check: returns
// whether the string contains the separator character.
@@ -945,7 +945,7 @@ class DexFile {
// The base address of the memory mapping.
- const byte* const begin_;
+ const uint8_t* const begin_;
// The size of the underlying memory allocation in bytes.
const size_t size_;
@@ -1059,7 +1059,7 @@ std::ostream& operator<<(std::ostream& os, const Signature& sig);
// Iterate and decode class_data_item
class ClassDataItemIterator {
public:
- ClassDataItemIterator(const DexFile& dex_file, const byte* raw_class_data_item)
+ ClassDataItemIterator(const DexFile& dex_file, const uint8_t* raw_class_data_item)
: dex_file_(dex_file), pos_(0), ptr_pos_(raw_class_data_item), last_idx_(0) {
ReadClassDataHeader();
if (EndOfInstanceFieldsPos() > 0) {
@@ -1174,7 +1174,7 @@ class ClassDataItemIterator {
uint32_t GetMethodCodeItemOffset() const {
return method_.code_off_;
}
- const byte* EndDataPointer() const {
+ const uint8_t* EndDataPointer() const {
CHECK(!HasNext());
return ptr_pos_;
}
@@ -1236,7 +1236,7 @@ class ClassDataItemIterator {
const DexFile& dex_file_;
size_t pos_; // integral number of items passed
- const byte* ptr_pos_; // pointer into stream of class_data_item
+ const uint8_t* ptr_pos_; // pointer into stream of class_data_item
uint32_t last_idx_; // last read field or method index to apply delta to
DISALLOW_IMPLICIT_CONSTRUCTORS(ClassDataItemIterator);
};
@@ -1275,8 +1275,8 @@ class EncodedStaticFieldValueIterator {
};
private:
- static const byte kEncodedValueTypeMask = 0x1f; // 0b11111
- static const byte kEncodedValueArgShift = 5;
+ static constexpr uint8_t kEncodedValueTypeMask = 0x1f; // 0b11111
+ static constexpr uint8_t kEncodedValueArgShift = 5;
const DexFile& dex_file_;
Handle<mirror::DexCache>* const dex_cache_; // Dex cache to resolve literal objects.
@@ -1284,7 +1284,7 @@ class EncodedStaticFieldValueIterator {
ClassLinker* linker_; // Linker to resolve literal objects.
size_t array_size_; // Size of array.
size_t pos_; // Current position.
- const byte* ptr_; // Pointer into encoded data array.
+ const uint8_t* ptr_; // Pointer into encoded data array.
ValueType type_; // Type of current encoded value.
jvalue jval_; // Value of current encoded value.
DISALLOW_IMPLICIT_CONSTRUCTORS(EncodedStaticFieldValueIterator);
@@ -1298,7 +1298,7 @@ class CatchHandlerIterator {
CatchHandlerIterator(const DexFile::CodeItem& code_item,
const DexFile::TryItem& try_item);
- explicit CatchHandlerIterator(const byte* handler_data) {
+ explicit CatchHandlerIterator(const uint8_t* handler_data) {
Init(handler_data);
}
@@ -1313,20 +1313,20 @@ class CatchHandlerIterator {
return remaining_count_ != -1 || catch_all_;
}
// End of this set of catch blocks, convenience method to locate next set of catch blocks
- const byte* EndDataPointer() const {
+ const uint8_t* EndDataPointer() const {
CHECK(!HasNext());
return current_data_;
}
private:
void Init(const DexFile::CodeItem& code_item, int32_t offset);
- void Init(const byte* handler_data);
+ void Init(const uint8_t* handler_data);
struct CatchHandlerItem {
uint16_t type_idx_; // type index of the caught exception type
uint32_t address_; // handler address
} handler_;
- const byte *current_data_; // the current handler in dex file.
+ const uint8_t* current_data_; // the current handler in dex file.
int32_t remaining_count_; // number of handlers not read.
bool catch_all_; // is there a handler that will catch all exceptions in case
// that all typed handler does not match.
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index d0c5603cc4..134e284999 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -35,7 +35,7 @@ TEST_F(DexFileTest, Open) {
ASSERT_TRUE(dex != NULL);
}
-static const byte kBase64Map[256] = {
+static const uint8_t kBase64Map[256] = {
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
@@ -60,12 +60,12 @@ static const byte kBase64Map[256] = {
255, 255, 255, 255
};
-static inline byte* DecodeBase64(const char* src, size_t* dst_size) {
- std::vector<byte> tmp;
+static inline uint8_t* DecodeBase64(const char* src, size_t* dst_size) {
+ std::vector<uint8_t> tmp;
uint32_t t = 0, y = 0;
int g = 3;
for (size_t i = 0; src[i] != '\0'; ++i) {
- byte c = kBase64Map[src[i] & 0xFF];
+ uint8_t c = kBase64Map[src[i] & 0xFF];
if (c == 255) continue;
// the final = symbols are read and used to trim the remaining bytes
if (c == 254) {
@@ -96,7 +96,7 @@ static inline byte* DecodeBase64(const char* src, size_t* dst_size) {
*dst_size = 0;
return nullptr;
}
- std::unique_ptr<byte[]> dst(new byte[tmp.size()]);
+ std::unique_ptr<uint8_t[]> dst(new uint8_t[tmp.size()]);
if (dst_size != nullptr) {
*dst_size = tmp.size();
} else {
@@ -137,7 +137,7 @@ static const DexFile* OpenDexFileBase64(const char* base64,
// decode base64
CHECK(base64 != NULL);
size_t length;
- std::unique_ptr<byte[]> dex_bytes(DecodeBase64(base64, &length));
+ std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(base64, &length));
CHECK(dex_bytes.get() != NULL);
// write to provided file
@@ -229,7 +229,7 @@ TEST_F(DexFileTest, GetMethodSignature) {
const DexFile::ClassDef& class_def = raw->GetClassDef(0);
ASSERT_STREQ("LGetMethodSignature;", raw->GetClassDescriptor(class_def));
- const byte* class_data = raw->GetClassData(class_def);
+ const uint8_t* class_data = raw->GetClassData(class_def);
ASSERT_TRUE(class_data != NULL);
ClassDataItemIterator it(*raw, class_data);
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 9eba92f8f1..a3f3de8514 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -124,7 +124,7 @@ const DexFile::MethodId* DexFileVerifier::CheckLoadMethodId(uint32_t idx, const
error_stmt; \
}
-bool DexFileVerifier::Verify(const DexFile* dex_file, const byte* begin, size_t size,
+bool DexFileVerifier::Verify(const DexFile* dex_file, const uint8_t* begin, size_t size,
const char* location, std::string* error_msg) {
std::unique_ptr<DexFileVerifier> verifier(new DexFileVerifier(dex_file, begin, size, location));
if (!verifier->Verify()) {
@@ -142,7 +142,7 @@ bool DexFileVerifier::CheckShortyDescriptorMatch(char shorty_char, const char* d
ErrorStringPrintf("Invalid use of void");
return false;
}
- // Intentional fallthrough.
+ FALLTHROUGH_INTENDED;
case 'B':
case 'C':
case 'D':
@@ -175,8 +175,8 @@ bool DexFileVerifier::CheckListSize(const void* start, size_t count, size_t elem
// Check that size is not 0.
CHECK_NE(elem_size, 0U);
- const byte* range_start = reinterpret_cast<const byte*>(start);
- const byte* file_start = reinterpret_cast<const byte*>(begin_);
+ const uint8_t* range_start = reinterpret_cast<const uint8_t*>(start);
+ const uint8_t* file_start = reinterpret_cast<const uint8_t*>(begin_);
// Check for overflow.
uintptr_t max = 0 - 1;
@@ -189,8 +189,8 @@ bool DexFileVerifier::CheckListSize(const void* start, size_t count, size_t elem
return false;
}
- const byte* range_end = range_start + count * elem_size;
- const byte* file_end = file_start + size_;
+ const uint8_t* range_end = range_start + count * elem_size;
+ const uint8_t* file_end = file_start + size_;
if (UNLIKELY((range_start < file_start) || (range_end > file_end))) {
// Note: these two tests are enough as we make sure above that there's no overflow.
ErrorStringPrintf("Bad range for %s: %zx to %zx", label,
@@ -201,7 +201,7 @@ bool DexFileVerifier::CheckListSize(const void* start, size_t count, size_t elem
return true;
}
-bool DexFileVerifier::CheckList(size_t element_size, const char* label, const byte* *ptr) {
+bool DexFileVerifier::CheckList(size_t element_size, const char* label, const uint8_t* *ptr) {
// Check that the list is available. The first 4B are the count.
if (!CheckListSize(*ptr, 1, 4U, label)) {
return false;
@@ -251,7 +251,7 @@ bool DexFileVerifier::CheckHeader() {
// Compute and verify the checksum in the header.
uint32_t adler_checksum = adler32(0L, Z_NULL, 0);
const uint32_t non_sum = sizeof(header_->magic_) + sizeof(header_->checksum_);
- const byte* non_sum_ptr = reinterpret_cast<const byte*>(header_) + non_sum;
+ const uint8_t* non_sum_ptr = reinterpret_cast<const uint8_t*>(header_) + non_sum;
adler_checksum = adler32(adler_checksum, non_sum_ptr, expected_size - non_sum);
if (adler_checksum != header_->checksum_) {
ErrorStringPrintf("Bad checksum (%08x, expected %08x)", adler_checksum, header_->checksum_);
@@ -388,7 +388,7 @@ bool DexFileVerifier::CheckMap() {
uint32_t DexFileVerifier::ReadUnsignedLittleEndian(uint32_t size) {
uint32_t result = 0;
- if (LIKELY(CheckListSize(ptr_, size, sizeof(byte), "encoded_value"))) {
+ if (LIKELY(CheckListSize(ptr_, size, sizeof(uint8_t), "encoded_value"))) {
for (uint32_t i = 0; i < size; i++) {
result |= ((uint32_t) *(ptr_++)) << (i * 8);
}
@@ -398,7 +398,7 @@ uint32_t DexFileVerifier::ReadUnsignedLittleEndian(uint32_t size) {
bool DexFileVerifier::CheckAndGetHandlerOffsets(const DexFile::CodeItem* code_item,
uint32_t* handler_offsets, uint32_t handlers_size) {
- const byte* handlers_base = DexFile::GetCatchHandlerData(*code_item, 0);
+ const uint8_t* handlers_base = DexFile::GetCatchHandlerData(*code_item, 0);
for (uint32_t i = 0; i < handlers_size; i++) {
bool catch_all;
@@ -503,7 +503,7 @@ bool DexFileVerifier::CheckClassDataItemMethod(uint32_t idx, uint32_t access_fla
bool DexFileVerifier::CheckPadding(size_t offset, uint32_t aligned_offset) {
if (offset < aligned_offset) {
- if (!CheckListSize(begin_ + offset, aligned_offset - offset, sizeof(byte), "section")) {
+ if (!CheckListSize(begin_ + offset, aligned_offset - offset, sizeof(uint8_t), "section")) {
return false;
}
while (offset < aligned_offset) {
@@ -519,7 +519,7 @@ bool DexFileVerifier::CheckPadding(size_t offset, uint32_t aligned_offset) {
}
bool DexFileVerifier::CheckEncodedValue() {
- if (!CheckListSize(ptr_, 1, sizeof(byte), "encoded_value header")) {
+ if (!CheckListSize(ptr_, 1, sizeof(uint8_t), "encoded_value header")) {
return false;
}
@@ -746,7 +746,7 @@ bool DexFileVerifier::CheckIntraCodeItem() {
// Grab the end of the insns if there are no try_items.
uint32_t try_items_size = code_item->tries_size_;
if (try_items_size == 0) {
- ptr_ = reinterpret_cast<const byte*>(&insns[insns_size]);
+ ptr_ = reinterpret_cast<const uint8_t*>(&insns[insns_size]);
return true;
}
@@ -812,7 +812,7 @@ bool DexFileVerifier::CheckIntraCodeItem() {
bool DexFileVerifier::CheckIntraStringDataItem() {
uint32_t size = DecodeUnsignedLeb128(&ptr_);
- const byte* file_end = begin_ + size_;
+ const uint8_t* file_end = begin_ + size_;
for (uint32_t i = 0; i < size; i++) {
CHECK_LT(i, size); // b/15014252 Prevents hitting the impossible case below
@@ -1003,7 +1003,7 @@ bool DexFileVerifier::CheckIntraDebugInfoItem() {
}
bool DexFileVerifier::CheckIntraAnnotationItem() {
- if (!CheckListSize(ptr_, 1, sizeof(byte), "annotation visibility")) {
+ if (!CheckListSize(ptr_, 1, sizeof(uint8_t), "annotation visibility")) {
return false;
}
@@ -1090,7 +1090,7 @@ bool DexFileVerifier::CheckIntraAnnotationsDirectoryItem() {
}
// Return a pointer to the end of the annotations.
- ptr_ = reinterpret_cast<const byte*>(parameter_item);
+ ptr_ = reinterpret_cast<const uint8_t*>(parameter_item);
return true;
}
@@ -1416,7 +1416,7 @@ bool DexFileVerifier::CheckOffsetToTypeMap(size_t offset, uint16_t type) {
return true;
}
-uint16_t DexFileVerifier::FindFirstClassDataDefiner(const byte* ptr, bool* success) {
+uint16_t DexFileVerifier::FindFirstClassDataDefiner(const uint8_t* ptr, bool* success) {
ClassDataItemIterator it(*dex_file_, ptr);
*success = true;
@@ -1435,7 +1435,7 @@ uint16_t DexFileVerifier::FindFirstClassDataDefiner(const byte* ptr, bool* succe
return DexFile::kDexNoIndex16;
}
-uint16_t DexFileVerifier::FindFirstAnnotationsDirectoryDefiner(const byte* ptr, bool* success) {
+uint16_t DexFileVerifier::FindFirstAnnotationsDirectoryDefiner(const uint8_t* ptr, bool* success) {
const DexFile::AnnotationsDirectoryItem* item =
reinterpret_cast<const DexFile::AnnotationsDirectoryItem*>(ptr);
*success = true;
@@ -1759,7 +1759,7 @@ bool DexFileVerifier::CheckInterClassDefItem() {
// Check that references in class_data_item are to the right class.
if (item->class_data_off_ != 0) {
- const byte* data = begin_ + item->class_data_off_;
+ const uint8_t* data = begin_ + item->class_data_off_;
bool success;
uint16_t data_definer = FindFirstClassDataDefiner(data, &success);
if (!success) {
@@ -1773,7 +1773,7 @@ bool DexFileVerifier::CheckInterClassDefItem() {
// Check that references in annotations_directory_item are to right class.
if (item->annotations_off_ != 0) {
- const byte* data = begin_ + item->annotations_off_;
+ const uint8_t* data = begin_ + item->annotations_off_;
bool success;
uint16_t annotations_definer = FindFirstAnnotationsDirectoryDefiner(data, &success);
if (!success) {
@@ -1804,7 +1804,7 @@ bool DexFileVerifier::CheckInterAnnotationSetRefList() {
item++;
}
- ptr_ = reinterpret_cast<const byte*>(item);
+ ptr_ = reinterpret_cast<const uint8_t*>(item);
return true;
}
@@ -1834,7 +1834,7 @@ bool DexFileVerifier::CheckInterAnnotationSetItem() {
offsets++;
}
- ptr_ = reinterpret_cast<const byte*>(offsets);
+ ptr_ = reinterpret_cast<const uint8_t*>(offsets);
return true;
}
@@ -1935,7 +1935,7 @@ bool DexFileVerifier::CheckInterAnnotationsDirectoryItem() {
parameter_item++;
}
- ptr_ = reinterpret_cast<const byte*>(parameter_item);
+ ptr_ = reinterpret_cast<const uint8_t*>(parameter_item);
return true;
}
@@ -1956,7 +1956,7 @@ bool DexFileVerifier::CheckInterSectionIterate(size_t offset, uint32_t count, ui
for (uint32_t i = 0; i < count; i++) {
uint32_t new_offset = (offset + alignment_mask) & ~alignment_mask;
ptr_ = begin_ + new_offset;
- const byte* prev_ptr = ptr_;
+ const uint8_t* prev_ptr = ptr_;
// Check depending on the section type.
switch (type) {
diff --git a/runtime/dex_file_verifier.h b/runtime/dex_file_verifier.h
index 606da54225..18bf2e7a88 100644
--- a/runtime/dex_file_verifier.h
+++ b/runtime/dex_file_verifier.h
@@ -26,7 +26,7 @@ namespace art {
class DexFileVerifier {
public:
- static bool Verify(const DexFile* dex_file, const byte* begin, size_t size,
+ static bool Verify(const DexFile* dex_file, const uint8_t* begin, size_t size,
const char* location, std::string* error_msg);
const std::string& FailureReason() const {
@@ -34,7 +34,7 @@ class DexFileVerifier {
}
private:
- DexFileVerifier(const DexFile* dex_file, const byte* begin, size_t size, const char* location)
+ DexFileVerifier(const DexFile* dex_file, const uint8_t* begin, size_t size, const char* location)
: dex_file_(dex_file), begin_(begin), size_(size), location_(location),
header_(&dex_file->GetHeader()), ptr_(NULL), previous_item_(NULL) {
}
@@ -45,7 +45,7 @@ class DexFileVerifier {
bool CheckListSize(const void* start, size_t count, size_t element_size, const char* label);
// Check a list. The head is assumed to be at *ptr, and elements to be of size element_size. If
// successful, the ptr will be moved forward the amount covered by the list.
- bool CheckList(size_t element_size, const char* label, const byte* *ptr);
+ bool CheckList(size_t element_size, const char* label, const uint8_t* *ptr);
// Checks whether the offset is zero (when size is zero) or that the offset falls within the area
// claimed by the file.
bool CheckValidOffsetAndSize(uint32_t offset, uint32_t size, const char* label);
@@ -81,8 +81,8 @@ class DexFileVerifier {
// Note: as sometimes kDexNoIndex16, being 0xFFFF, is a valid return value, we need an
// additional out parameter to signal any errors loading an index.
- uint16_t FindFirstClassDataDefiner(const byte* ptr, bool* success);
- uint16_t FindFirstAnnotationsDirectoryDefiner(const byte* ptr, bool* success);
+ uint16_t FindFirstClassDataDefiner(const uint8_t* ptr, bool* success);
+ uint16_t FindFirstAnnotationsDirectoryDefiner(const uint8_t* ptr, bool* success);
bool CheckInterStringIdItem();
bool CheckInterTypeIdItem();
@@ -112,13 +112,13 @@ class DexFileVerifier {
__attribute__((__format__(__printf__, 2, 3))) COLD_ATTR;
const DexFile* const dex_file_;
- const byte* const begin_;
+ const uint8_t* const begin_;
const size_t size_;
const char* const location_;
const DexFile::Header* const header_;
AllocationTrackingSafeMap<uint32_t, uint16_t, kAllocatorTagDexFileVerifier> offset_to_type_map_;
- const byte* ptr_;
+ const uint8_t* ptr_;
const void* previous_item_;
std::string failure_reason_;
diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc
index d475d426ff..addd94833e 100644
--- a/runtime/dex_file_verifier_test.cc
+++ b/runtime/dex_file_verifier_test.cc
@@ -30,7 +30,7 @@ namespace art {
class DexFileVerifierTest : public CommonRuntimeTest {};
-static const byte kBase64Map[256] = {
+static const uint8_t kBase64Map[256] = {
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
@@ -55,12 +55,12 @@ static const byte kBase64Map[256] = {
255, 255, 255, 255
};
-static inline byte* DecodeBase64(const char* src, size_t* dst_size) {
- std::vector<byte> tmp;
+static inline uint8_t* DecodeBase64(const char* src, size_t* dst_size) {
+ std::vector<uint8_t> tmp;
uint32_t t = 0, y = 0;
int g = 3;
for (size_t i = 0; src[i] != '\0'; ++i) {
- byte c = kBase64Map[src[i] & 0xFF];
+ uint8_t c = kBase64Map[src[i] & 0xFF];
if (c == 255) continue;
// the final = symbols are read and used to trim the remaining bytes
if (c == 254) {
@@ -91,7 +91,7 @@ static inline byte* DecodeBase64(const char* src, size_t* dst_size) {
*dst_size = 0;
return nullptr;
}
- std::unique_ptr<byte[]> dst(new byte[tmp.size()]);
+ std::unique_ptr<uint8_t[]> dst(new uint8_t[tmp.size()]);
if (dst_size != nullptr) {
*dst_size = tmp.size();
} else {
@@ -106,7 +106,7 @@ static const DexFile* OpenDexFileBase64(const char* base64, const char* location
// decode base64
CHECK(base64 != NULL);
size_t length;
- std::unique_ptr<byte[]> dex_bytes(DecodeBase64(base64, &length));
+ std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(base64, &length));
CHECK(dex_bytes.get() != NULL);
// write to provided file
@@ -153,17 +153,17 @@ TEST_F(DexFileVerifierTest, GoodDex) {
ASSERT_TRUE(raw.get() != nullptr) << error_msg;
}
-static void FixUpChecksum(byte* dex_file) {
+static void FixUpChecksum(uint8_t* dex_file) {
DexFile::Header* header = reinterpret_cast<DexFile::Header*>(dex_file);
uint32_t expected_size = header->file_size_;
uint32_t adler_checksum = adler32(0L, Z_NULL, 0);
const uint32_t non_sum = sizeof(DexFile::Header::magic_) + sizeof(DexFile::Header::checksum_);
- const byte* non_sum_ptr = dex_file + non_sum;
+ const uint8_t* non_sum_ptr = dex_file + non_sum;
adler_checksum = adler32(adler_checksum, non_sum_ptr, expected_size - non_sum);
header->checksum_ = adler_checksum;
}
-static const DexFile* FixChecksumAndOpen(byte* bytes, size_t length, const char* location,
+static const DexFile* FixChecksumAndOpen(uint8_t* bytes, size_t length, const char* location,
std::string* error_msg) {
// Check data.
CHECK(bytes != nullptr);
@@ -196,7 +196,7 @@ static bool ModifyAndLoad(const char* location, size_t offset, uint8_t new_val,
std::string* error_msg) {
// Decode base64.
size_t length;
- std::unique_ptr<byte[]> dex_bytes(DecodeBase64(kGoodTestDex, &length));
+ std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(kGoodTestDex, &length));
CHECK(dex_bytes.get() != NULL);
// Make modifications.
diff --git a/runtime/dex_instruction-inl.h b/runtime/dex_instruction-inl.h
index ad9491fda6..dd65f2c0c6 100644
--- a/runtime/dex_instruction-inl.h
+++ b/runtime/dex_instruction-inl.h
@@ -460,11 +460,21 @@ inline void Instruction::GetVarArgs(uint32_t arg[5], uint16_t inst_data) const {
* copies of those.) Note that cases 5..2 fall through.
*/
switch (count) {
- case 5: arg[4] = InstA(inst_data);
- case 4: arg[3] = (regList >> 12) & 0x0f;
- case 3: arg[2] = (regList >> 8) & 0x0f;
- case 2: arg[1] = (regList >> 4) & 0x0f;
- case 1: arg[0] = regList & 0x0f; break;
+ case 5:
+ arg[4] = InstA(inst_data);
+ FALLTHROUGH_INTENDED;
+ case 4:
+ arg[3] = (regList >> 12) & 0x0f;
+ FALLTHROUGH_INTENDED;
+ case 3:
+ arg[2] = (regList >> 8) & 0x0f;
+ FALLTHROUGH_INTENDED;
+ case 2:
+ arg[1] = (regList >> 4) & 0x0f;
+ FALLTHROUGH_INTENDED;
+ case 1:
+ arg[0] = regList & 0x0f;
+ break;
default: // case 0
break; // Valid, but no need to do anything.
}
diff --git a/runtime/dex_instruction.cc b/runtime/dex_instruction.cc
index 0a71d621e1..7e775f4ed3 100644
--- a/runtime/dex_instruction.cc
+++ b/runtime/dex_instruction.cc
@@ -111,7 +111,7 @@ size_t Instruction::SizeInCodeUnitsComplexOpcode() const {
if ((*insns & 0xFF) == 0) {
return 1; // NOP.
} else {
- LOG(FATAL) << "Unreachable: " << DumpString(NULL);
+ LOG(FATAL) << "Unreachable: " << DumpString(nullptr);
return 0;
}
}
@@ -161,21 +161,23 @@ std::string Instruction::DumpString(const DexFile* file) const {
case k21c: {
switch (Opcode()) {
case CONST_STRING:
- if (file != NULL) {
+ if (file != nullptr) {
uint32_t string_idx = VRegB_21c();
os << StringPrintf("const-string v%d, %s // string@%d", VRegA_21c(),
PrintableString(file->StringDataByIdx(string_idx)).c_str(), string_idx);
break;
- } // else fall-through
+ }
+ FALLTHROUGH_INTENDED;
case CHECK_CAST:
case CONST_CLASS:
case NEW_INSTANCE:
- if (file != NULL) {
+ if (file != nullptr) {
uint32_t type_idx = VRegB_21c();
os << opcode << " v" << static_cast<int>(VRegA_21c()) << ", " << PrettyType(type_idx, *file)
<< " // type@" << type_idx;
break;
- } // else fall-through
+ }
+ FALLTHROUGH_INTENDED;
case SGET:
case SGET_WIDE:
case SGET_OBJECT:
@@ -183,12 +185,13 @@ std::string Instruction::DumpString(const DexFile* file) const {
case SGET_BYTE:
case SGET_CHAR:
case SGET_SHORT:
- if (file != NULL) {
+ if (file != nullptr) {
uint32_t field_idx = VRegB_21c();
os << opcode << " v" << static_cast<int>(VRegA_21c()) << ", " << PrettyField(field_idx, *file, true)
<< " // field@" << field_idx;
break;
- } // else fall-through
+ }
+ FALLTHROUGH_INTENDED;
case SPUT:
case SPUT_WIDE:
case SPUT_OBJECT:
@@ -196,12 +199,13 @@ std::string Instruction::DumpString(const DexFile* file) const {
case SPUT_BYTE:
case SPUT_CHAR:
case SPUT_SHORT:
- if (file != NULL) {
+ if (file != nullptr) {
uint32_t field_idx = VRegB_21c();
os << opcode << " v" << static_cast<int>(VRegA_21c()) << ", " << PrettyField(field_idx, *file, true)
<< " // field@" << field_idx;
break;
- } // else fall-through
+ }
+ FALLTHROUGH_INTENDED;
default:
os << StringPrintf("%s v%d, thing@%d", opcode, VRegA_21c(), VRegB_21c());
break;
@@ -221,20 +225,22 @@ std::string Instruction::DumpString(const DexFile* file) const {
case IGET_BYTE:
case IGET_CHAR:
case IGET_SHORT:
- if (file != NULL) {
+ if (file != nullptr) {
uint32_t field_idx = VRegC_22c();
os << opcode << " v" << static_cast<int>(VRegA_22c()) << ", v" << static_cast<int>(VRegB_22c()) << ", "
<< PrettyField(field_idx, *file, true) << " // field@" << field_idx;
break;
- } // else fall-through
+ }
+ FALLTHROUGH_INTENDED;
case IGET_QUICK:
case IGET_OBJECT_QUICK:
- if (file != NULL) {
+ if (file != nullptr) {
uint32_t field_idx = VRegC_22c();
os << opcode << " v" << static_cast<int>(VRegA_22c()) << ", v" << static_cast<int>(VRegB_22c()) << ", "
<< "// offset@" << field_idx;
break;
- } // else fall-through
+ }
+ FALLTHROUGH_INTENDED;
case IPUT:
case IPUT_WIDE:
case IPUT_OBJECT:
@@ -242,34 +248,38 @@ std::string Instruction::DumpString(const DexFile* file) const {
case IPUT_BYTE:
case IPUT_CHAR:
case IPUT_SHORT:
- if (file != NULL) {
+ if (file != nullptr) {
uint32_t field_idx = VRegC_22c();
os << opcode << " v" << static_cast<int>(VRegA_22c()) << ", v" << static_cast<int>(VRegB_22c()) << ", "
<< PrettyField(field_idx, *file, true) << " // field@" << field_idx;
break;
- } // else fall-through
+ }
+ FALLTHROUGH_INTENDED;
case IPUT_QUICK:
case IPUT_OBJECT_QUICK:
- if (file != NULL) {
+ if (file != nullptr) {
uint32_t field_idx = VRegC_22c();
os << opcode << " v" << static_cast<int>(VRegA_22c()) << ", v" << static_cast<int>(VRegB_22c()) << ", "
<< "// offset@" << field_idx;
break;
- } // else fall-through
+ }
+ FALLTHROUGH_INTENDED;
case INSTANCE_OF:
- if (file != NULL) {
+ if (file != nullptr) {
uint32_t type_idx = VRegC_22c();
os << opcode << " v" << static_cast<int>(VRegA_22c()) << ", v" << static_cast<int>(VRegB_22c()) << ", "
<< PrettyType(type_idx, *file) << " // type@" << type_idx;
break;
}
+ FALLTHROUGH_INTENDED;
case NEW_ARRAY:
- if (file != NULL) {
+ if (file != nullptr) {
uint32_t type_idx = VRegC_22c();
os << opcode << " v" << static_cast<int>(VRegA_22c()) << ", v" << static_cast<int>(VRegB_22c()) << ", "
<< PrettyType(type_idx, *file) << " // type@" << type_idx;
break;
- } // else fall-through
+ }
+ FALLTHROUGH_INTENDED;
default:
os << StringPrintf("%s v%d, v%d, thing@%d", opcode, VRegA_22c(), VRegB_22c(), VRegC_22c());
break;
@@ -283,7 +293,7 @@ std::string Instruction::DumpString(const DexFile* file) const {
case k31c:
if (Opcode() == CONST_STRING_JUMBO) {
uint32_t string_idx = VRegB_31c();
- if (file != NULL) {
+ if (file != nullptr) {
os << StringPrintf("%s v%d, %s // string@%d", opcode, VRegA_31c(),
PrintableString(file->StringDataByIdx(string_idx)).c_str(),
string_idx);
@@ -317,7 +327,7 @@ std::string Instruction::DumpString(const DexFile* file) const {
case INVOKE_DIRECT:
case INVOKE_STATIC:
case INVOKE_INTERFACE:
- if (file != NULL) {
+ if (file != nullptr) {
os << opcode << " {";
uint32_t method_idx = VRegB_35c();
for (size_t i = 0; i < VRegA_35c(); ++i) {
@@ -328,9 +338,10 @@ std::string Instruction::DumpString(const DexFile* file) const {
}
os << "}, " << PrettyMethod(method_idx, *file) << " // method@" << method_idx;
break;
- } // else fall-through
+ }
+ FALLTHROUGH_INTENDED;
case INVOKE_VIRTUAL_QUICK:
- if (file != NULL) {
+ if (file != nullptr) {
os << opcode << " {";
uint32_t method_idx = VRegB_35c();
for (size_t i = 0; i < VRegA_35c(); ++i) {
@@ -341,7 +352,8 @@ std::string Instruction::DumpString(const DexFile* file) const {
}
os << "}, // vtable@" << method_idx;
break;
- } // else fall-through
+ }
+ FALLTHROUGH_INTENDED;
default:
os << opcode << " {v" << arg[0] << ", v" << arg[1] << ", v" << arg[2]
<< ", v" << arg[3] << ", v" << arg[4] << "}, thing@" << VRegB_35c();
@@ -356,19 +368,21 @@ std::string Instruction::DumpString(const DexFile* file) const {
case INVOKE_DIRECT_RANGE:
case INVOKE_STATIC_RANGE:
case INVOKE_INTERFACE_RANGE:
- if (file != NULL) {
+ if (file != nullptr) {
uint32_t method_idx = VRegB_3rc();
os << StringPrintf("%s, {v%d .. v%d}, ", opcode, VRegC_3rc(), (VRegC_3rc() + VRegA_3rc() - 1))
<< PrettyMethod(method_idx, *file) << " // method@" << method_idx;
break;
- } // else fall-through
+ }
+ FALLTHROUGH_INTENDED;
case INVOKE_VIRTUAL_RANGE_QUICK:
- if (file != NULL) {
+ if (file != nullptr) {
uint32_t method_idx = VRegB_3rc();
os << StringPrintf("%s, {v%d .. v%d}, ", opcode, VRegC_3rc(), (VRegC_3rc() + VRegA_3rc() - 1))
<< "// vtable@" << method_idx;
break;
- } // else fall-through
+ }
+ FALLTHROUGH_INTENDED;
default:
os << StringPrintf("%s, {v%d .. v%d}, thing@%d", opcode, VRegC_3rc(),
(VRegC_3rc() + VRegA_3rc() - 1), VRegB_3rc());
diff --git a/runtime/dex_method_iterator.h b/runtime/dex_method_iterator.h
index 806266d26d..14e316f2a5 100644
--- a/runtime/dex_method_iterator.h
+++ b/runtime/dex_method_iterator.h
@@ -139,7 +139,7 @@ class DexMethodIterator {
uint32_t dex_file_index_;
uint32_t class_def_index_;
const DexFile::ClassDef* class_def_;
- const byte* class_data_;
+ const uint8_t* class_data_;
std::unique_ptr<ClassDataItemIterator> it_;
bool direct_method_;
};
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 3b8358d12e..c3a25595cd 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -43,7 +43,7 @@ extern "C" {
struct JITCodeEntry {
JITCodeEntry* next_;
JITCodeEntry* prev_;
- const byte *symfile_addr_;
+ const uint8_t *symfile_addr_;
uint64_t symfile_size_;
};
@@ -68,7 +68,7 @@ extern "C" {
}
-static JITCodeEntry* CreateCodeEntry(const byte *symfile_addr,
+static JITCodeEntry* CreateCodeEntry(const uint8_t *symfile_addr,
uintptr_t symfile_size) {
JITCodeEntry* entry = new JITCodeEntry;
entry->symfile_addr_ = symfile_addr;
@@ -264,7 +264,7 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
}
if (!CheckAndSet(GetDynamicProgramHeader().p_offset, "dynamic section",
- reinterpret_cast<byte**>(&dynamic_section_start_), error_msg)) {
+ reinterpret_cast<uint8_t**>(&dynamic_section_start_), error_msg)) {
return false;
}
@@ -279,14 +279,14 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
switch (section_header->sh_type) {
case SHT_SYMTAB: {
if (!CheckAndSet(section_header->sh_offset, "symtab",
- reinterpret_cast<byte**>(&symtab_section_start_), error_msg)) {
+ reinterpret_cast<uint8_t**>(&symtab_section_start_), error_msg)) {
return false;
}
break;
}
case SHT_DYNSYM: {
if (!CheckAndSet(section_header->sh_offset, "dynsym",
- reinterpret_cast<byte**>(&dynsym_section_start_), error_msg)) {
+ reinterpret_cast<uint8_t**>(&dynsym_section_start_), error_msg)) {
return false;
}
break;
@@ -298,7 +298,7 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
const char* header_name = GetString(*shstrtab_section_header, section_header->sh_name);
if (strncmp(".dynstr", header_name, 8) == 0) {
if (!CheckAndSet(section_header->sh_offset, "dynstr",
- reinterpret_cast<byte**>(&dynstr_section_start_), error_msg)) {
+ reinterpret_cast<uint8_t**>(&dynstr_section_start_), error_msg)) {
return false;
}
}
@@ -307,7 +307,7 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
const char* header_name = GetString(*shstrtab_section_header, section_header->sh_name);
if (strncmp(".strtab", header_name, 8) == 0) {
if (!CheckAndSet(section_header->sh_offset, "strtab",
- reinterpret_cast<byte**>(&strtab_section_start_), error_msg)) {
+ reinterpret_cast<uint8_t**>(&strtab_section_start_), error_msg)) {
return false;
}
}
@@ -315,7 +315,7 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
break;
}
case SHT_DYNAMIC: {
- if (reinterpret_cast<byte*>(dynamic_section_start_) !=
+ if (reinterpret_cast<uint8_t*>(dynamic_section_start_) !=
Begin() + section_header->sh_offset) {
LOG(WARNING) << "Failed to find matching SHT_DYNAMIC for PT_DYNAMIC in "
<< file_->GetPath() << ": " << std::hex
@@ -327,7 +327,7 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
}
case SHT_HASH: {
if (!CheckAndSet(section_header->sh_offset, "hash section",
- reinterpret_cast<byte**>(&hash_section_start_), error_msg)) {
+ reinterpret_cast<uint8_t**>(&hash_section_start_), error_msg)) {
return false;
}
break;
@@ -365,7 +365,7 @@ template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_
bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
::CheckAndSet(Elf32_Off offset, const char* label,
- byte** target, std::string* error_msg) {
+ uint8_t** target, std::string* error_msg) {
if (Begin() + offset >= End()) {
*error_msg = StringPrintf("Offset %d is out of range for %s in ELF file: '%s'", offset, label,
file_->GetPath().c_str());
@@ -380,7 +380,7 @@ template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_
typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::CheckSectionsLinked(const byte* source, const byte* target) const {
+ ::CheckSectionsLinked(const uint8_t* source, const uint8_t* target) const {
// Only works in whole-program mode, as we need to iterate over the sections.
// Note that we normally can't search by type, as duplicates are allowed for most section types.
if (program_header_only_) {
@@ -449,8 +449,8 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
}
// The symtab should link to the strtab.
- if (!CheckSectionsLinked(reinterpret_cast<const byte*>(symtab_section_start_),
- reinterpret_cast<const byte*>(strtab_section_start_))) {
+ if (!CheckSectionsLinked(reinterpret_cast<const uint8_t*>(symtab_section_start_),
+ reinterpret_cast<const uint8_t*>(strtab_section_start_))) {
*error_msg = StringPrintf("Symtab is not linked to the strtab in ELF file: '%s'",
file_->GetPath().c_str());
return false;
@@ -475,8 +475,8 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
}
// And the hash section should be linking to the dynsym.
- if (!CheckSectionsLinked(reinterpret_cast<const byte*>(hash_section_start_),
- reinterpret_cast<const byte*>(dynsym_section_start_))) {
+ if (!CheckSectionsLinked(reinterpret_cast<const uint8_t*>(hash_section_start_),
+ reinterpret_cast<const uint8_t*>(dynsym_section_start_))) {
*error_msg = StringPrintf("Hash section is not linked to the dynstr in ELF file: '%s'",
file_->GetPath().c_str());
return false;
@@ -637,7 +637,7 @@ Elf_Ehdr& ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-byte* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
+uint8_t* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
::GetProgramHeadersStart() const {
CHECK(program_headers_start_ != nullptr); // Header has been set in Setup. This is a sanity
@@ -648,7 +648,7 @@ byte* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-byte* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
+uint8_t* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
::GetSectionHeadersStart() const {
CHECK(!program_header_only_); // Only used in "full" mode.
@@ -813,7 +813,7 @@ Elf_Phdr* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
::GetProgramHeader(Elf_Word i) const {
CHECK_LT(i, GetProgramHeaderNum()) << file_->GetPath(); // Sanity check for caller.
- byte* program_header = GetProgramHeadersStart() + (i * GetHeader().e_phentsize);
+ uint8_t* program_header = GetProgramHeadersStart() + (i * GetHeader().e_phentsize);
if (program_header >= End()) {
return nullptr; // Failure condition.
}
@@ -856,7 +856,7 @@ Elf_Shdr* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
if (i >= GetSectionHeaderNum()) {
return nullptr; // Failure condition.
}
- byte* section_header = GetSectionHeadersStart() + (i * GetHeader().e_shentsize);
+ uint8_t* section_header = GetSectionHeadersStart() + (i * GetHeader().e_shentsize);
if (section_header >= End()) {
return nullptr; // Failure condition.
}
@@ -907,7 +907,7 @@ Elf_Shdr* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-const byte* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
+const uint8_t* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
::FindDynamicSymbolAddress(const std::string& symbol_name) const {
// Check that we have a hash section.
@@ -1133,8 +1133,8 @@ const char* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
if (i == 0) {
return nullptr;
}
- byte* strings = Begin() + string_section.sh_offset;
- byte* string = strings + i;
+ uint8_t* strings = Begin() + string_section.sh_offset;
+ uint8_t* string = strings + i;
if (string >= End()) {
return nullptr;
}
@@ -1361,8 +1361,8 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
}
size_t file_length = static_cast<size_t>(temp_file_length);
if (!reserved) {
- byte* reserve_base = ((program_header->p_vaddr != 0) ?
- reinterpret_cast<byte*>(program_header->p_vaddr) : nullptr);
+ uint8_t* reserve_base = ((program_header->p_vaddr != 0) ?
+ reinterpret_cast<uint8_t*>(program_header->p_vaddr) : nullptr);
std::string reservation_name("ElfFile reservation for ");
reservation_name += file_->GetPath();
std::unique_ptr<MemMap> reserve(MemMap::MapAnonymous(reservation_name.c_str(),
@@ -1384,7 +1384,7 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
if (program_header->p_memsz == 0) {
continue;
}
- byte* p_vaddr = base_address_ + program_header->p_vaddr;
+ uint8_t* p_vaddr = base_address_ + program_header->p_vaddr;
int prot = 0;
if (executable && ((program_header->p_flags & PF_X) != 0)) {
prot |= PROT_EXEC;
@@ -1431,7 +1431,7 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
}
// Now that we are done loading, .dynamic should be in memory to find .dynstr, .dynsym, .hash
- byte* dsptr = base_address_ + GetDynamicProgramHeader().p_vaddr;
+ uint8_t* dsptr = base_address_ + GetDynamicProgramHeader().p_vaddr;
if ((dsptr < Begin() || dsptr >= End()) && !ValidPointer(dsptr)) {
*error_msg = StringPrintf("dynamic section address invalid in ELF file %s",
file_->GetPath().c_str());
@@ -1441,7 +1441,7 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
for (Elf_Word i = 0; i < GetDynamicNum(); i++) {
Elf_Dyn& elf_dyn = GetDynamic(i);
- byte* d_ptr = base_address_ + elf_dyn.d_un.d_ptr;
+ uint8_t* d_ptr = base_address_ + elf_dyn.d_un.d_ptr;
switch (elf_dyn.d_tag) {
case DT_HASH: {
if (!ValidPointer(d_ptr)) {
@@ -1500,7 +1500,7 @@ template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_
typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::ValidPointer(const byte* start) const {
+ ::ValidPointer(const uint8_t* start) const {
for (size_t i = 0; i < segments_.size(); ++i) {
const MemMap* segment = segments_[i];
if (segment->Begin() <= start && start < segment->End()) {
@@ -1550,7 +1550,7 @@ struct PACKED(1) FDE32 {
};
static FDE32* NextFDE(FDE32* frame) {
- byte* fde_bytes = reinterpret_cast<byte*>(frame);
+ uint8_t* fde_bytes = reinterpret_cast<uint8_t*>(frame);
fde_bytes += frame->GetLength();
return reinterpret_cast<FDE32*>(fde_bytes);
}
@@ -1572,7 +1572,7 @@ struct PACKED(1) FDE64 {
};
static FDE64* NextFDE(FDE64* frame) {
- byte* fde_bytes = reinterpret_cast<byte*>(frame);
+ uint8_t* fde_bytes = reinterpret_cast<uint8_t*>(frame);
fde_bytes += frame->GetLength();
return reinterpret_cast<FDE64*>(fde_bytes);
}
@@ -1582,7 +1582,7 @@ static bool IsFDE(FDE64* frame) {
}
static bool FixupEHFrame(off_t base_address_delta,
- byte* eh_frame, size_t eh_frame_size) {
+ uint8_t* eh_frame, size_t eh_frame_size) {
if (*(reinterpret_cast<uint32_t*>(eh_frame)) == 0xffffffff) {
FDE64* last_frame = reinterpret_cast<FDE64*>(eh_frame + eh_frame_size);
FDE64* frame = NextFDE(reinterpret_cast<FDE64*>(eh_frame));
@@ -1787,8 +1787,8 @@ class DebugTag {
~DebugTag() {}
// Creates a new tag and moves data pointer up to the start of the next one.
// nullptr means error.
- static DebugTag* Create(const byte** data_pointer) {
- const byte* data = *data_pointer;
+ static DebugTag* Create(const uint8_t** data_pointer) {
+ const uint8_t* data = *data_pointer;
uint32_t index = DecodeUnsignedLeb128(&data);
std::unique_ptr<DebugTag> tag(new DebugTag(index));
tag->size_ = static_cast<uint32_t>(
@@ -1867,7 +1867,7 @@ class DebugTag {
class DebugAbbrev {
public:
~DebugAbbrev() {}
- static DebugAbbrev* Create(const byte* dbg_abbrev, size_t dbg_abbrev_size) {
+ static DebugAbbrev* Create(const uint8_t* dbg_abbrev, size_t dbg_abbrev_size) {
std::unique_ptr<DebugAbbrev> abbrev(new DebugAbbrev(dbg_abbrev, dbg_abbrev + dbg_abbrev_size));
if (!abbrev->ReadAtOffset(0)) {
return nullptr;
@@ -1878,7 +1878,7 @@ class DebugAbbrev {
bool ReadAtOffset(uint32_t abbrev_offset) {
tags_.clear();
tag_list_.clear();
- const byte* dbg_abbrev = begin_ + abbrev_offset;
+ const uint8_t* dbg_abbrev = begin_ + abbrev_offset;
while (dbg_abbrev < end_ && *dbg_abbrev != 0) {
std::unique_ptr<DebugTag> tag(DebugTag::Create(&dbg_abbrev));
if (tag.get() == nullptr) {
@@ -1891,7 +1891,7 @@ class DebugAbbrev {
return true;
}
- DebugTag* ReadTag(const byte* entry) {
+ DebugTag* ReadTag(const uint8_t* entry) {
uint32_t tag_num = DecodeUnsignedLeb128(&entry);
auto it = tags_.find(tag_num);
if (it == tags_.end()) {
@@ -1903,9 +1903,9 @@ class DebugAbbrev {
}
private:
- DebugAbbrev(const byte* begin, const byte* end) : begin_(begin), end_(end) {}
- const byte* begin_;
- const byte* end_;
+ DebugAbbrev(const uint8_t* begin, const uint8_t* end) : begin_(begin), end_(end) {}
+ const uint8_t* begin_;
+ const uint8_t* end_;
std::map<uint32_t, uint32_t> tags_;
std::vector<std::unique_ptr<DebugTag>> tag_list_;
};
@@ -1934,7 +1934,7 @@ class DebugInfoIterator {
if (reinterpret_cast<DebugInfoHeader*>(current_entry_) >= next_cu_) {
current_cu_ = next_cu_;
next_cu_ = GetNextCu(current_cu_);
- current_entry_ = reinterpret_cast<byte*>(current_cu_) + sizeof(DebugInfoHeader);
+ current_entry_ = reinterpret_cast<uint8_t*>(current_cu_) + sizeof(DebugInfoHeader);
reread_abbrev = true;
}
if (current_entry_ >= last_entry_) {
@@ -1956,7 +1956,7 @@ class DebugInfoIterator {
const DebugTag* GetCurrentTag() {
return const_cast<DebugTag*>(current_tag_);
}
- byte* GetPointerToField(uint8_t dwarf_field) {
+ uint8_t* GetPointerToField(uint8_t dwarf_field) {
if (current_tag_ == nullptr || current_entry_ == nullptr || current_entry_ >= last_entry_) {
return nullptr;
}
@@ -1972,7 +1972,7 @@ class DebugInfoIterator {
private:
static DebugInfoHeader* GetNextCu(DebugInfoHeader* hdr) {
- byte* hdr_byte = reinterpret_cast<byte*>(hdr);
+ uint8_t* hdr_byte = reinterpret_cast<uint8_t*>(hdr);
return reinterpret_cast<DebugInfoHeader*>(hdr_byte + sizeof(uint32_t) + hdr->unit_length);
}
@@ -1980,14 +1980,14 @@ class DebugInfoIterator {
: abbrev_(abbrev),
current_cu_(header),
next_cu_(GetNextCu(header)),
- last_entry_(reinterpret_cast<byte*>(header) + frame_size),
- current_entry_(reinterpret_cast<byte*>(header) + sizeof(DebugInfoHeader)),
+ last_entry_(reinterpret_cast<uint8_t*>(header) + frame_size),
+ current_entry_(reinterpret_cast<uint8_t*>(header) + sizeof(DebugInfoHeader)),
current_tag_(abbrev_->ReadTag(current_entry_)) {}
DebugAbbrev* abbrev_;
DebugInfoHeader* current_cu_;
DebugInfoHeader* next_cu_;
- byte* last_entry_;
- byte* current_entry_;
+ uint8_t* last_entry_;
+ uint8_t* current_entry_;
DebugTag* current_tag_;
};
@@ -2437,7 +2437,7 @@ ElfFile* ElfFile::Open(File* file, bool writable, bool program_header_only, std:
if (map == nullptr && map->Size() != EI_NIDENT) {
return nullptr;
}
- byte *header = map->Begin();
+ uint8_t* header = map->Begin();
if (header[EI_CLASS] == ELFCLASS64) {
ElfFileImpl64* elf_file_impl = ElfFileImpl64::Open(file, writable, program_header_only, error_msg);
if (elf_file_impl == nullptr)
@@ -2468,7 +2468,7 @@ ElfFile* ElfFile::Open(File* file, int mmap_prot, int mmap_flags, std::string* e
if (map == nullptr && map->Size() != EI_NIDENT) {
return nullptr;
}
- byte *header = map->Begin();
+ uint8_t* header = map->Begin();
if (header[EI_CLASS] == ELFCLASS64) {
ElfFileImpl64* elf_file_impl = ElfFileImpl64::Open(file, mmap_prot, mmap_flags, error_msg);
if (elf_file_impl == nullptr)
@@ -2501,7 +2501,7 @@ bool ElfFile::Load(bool executable, std::string* error_msg) {
DELEGATE_TO_IMPL(Load, executable, error_msg);
}
-const byte* ElfFile::FindDynamicSymbolAddress(const std::string& symbol_name) const {
+const uint8_t* ElfFile::FindDynamicSymbolAddress(const std::string& symbol_name) const {
DELEGATE_TO_IMPL(FindDynamicSymbolAddress, symbol_name);
}
@@ -2509,11 +2509,11 @@ size_t ElfFile::Size() const {
DELEGATE_TO_IMPL(Size);
}
-byte* ElfFile::Begin() const {
+uint8_t* ElfFile::Begin() const {
DELEGATE_TO_IMPL(Begin);
}
-byte* ElfFile::End() const {
+uint8_t* ElfFile::End() const {
DELEGATE_TO_IMPL(End);
}
diff --git a/runtime/elf_file.h b/runtime/elf_file.h
index ea6538b34a..a7f3056cae 100644
--- a/runtime/elf_file.h
+++ b/runtime/elf_file.h
@@ -40,13 +40,13 @@ class ElfFile {
// Load segments into memory based on PT_LOAD program headers
bool Load(bool executable, std::string* error_msg);
- const byte* FindDynamicSymbolAddress(const std::string& symbol_name) const;
+ const uint8_t* FindDynamicSymbolAddress(const std::string& symbol_name) const;
size_t Size() const;
- byte* Begin() const;
+ uint8_t* Begin() const;
- byte* End() const;
+ uint8_t* End() const;
const File& GetFile() const;
diff --git a/runtime/elf_file_impl.h b/runtime/elf_file_impl.h
index 942dc29128..a2fc422cea 100644
--- a/runtime/elf_file_impl.h
+++ b/runtime/elf_file_impl.h
@@ -46,11 +46,11 @@ class ElfFileImpl {
return *file_;
}
- byte* Begin() const {
+ uint8_t* Begin() const {
return map_->Begin();
}
- byte* End() const {
+ uint8_t* End() const {
return map_->End();
}
@@ -71,7 +71,7 @@ class ElfFileImpl {
Elf_Shdr* GetSectionNameStringSection() const;
// Find .dynsym using .hash for more efficient lookup than FindSymbolAddress.
- const byte* FindDynamicSymbolAddress(const std::string& symbol_name) const;
+ const uint8_t* FindDynamicSymbolAddress(const std::string& symbol_name) const;
static bool IsSymbolSectionType(Elf_Word section_type);
Elf_Word GetSymbolNum(Elf_Shdr&) const;
@@ -120,8 +120,8 @@ class ElfFileImpl {
bool SetMap(MemMap* map, std::string* error_msg);
- byte* GetProgramHeadersStart() const;
- byte* GetSectionHeadersStart() const;
+ uint8_t* GetProgramHeadersStart() const;
+ uint8_t* GetSectionHeadersStart() const;
Elf_Phdr& GetDynamicProgramHeader() const;
Elf_Dyn* GetDynamicSectionStart() const;
Elf_Sym* GetSymbolSectionStart(Elf_Word section_type) const;
@@ -137,7 +137,7 @@ class ElfFileImpl {
typedef std::map<std::string, Elf_Sym*> SymbolTable;
SymbolTable** GetSymbolTable(Elf_Word section_type);
- bool ValidPointer(const byte* start) const;
+ bool ValidPointer(const uint8_t* start) const;
const Elf_Sym* FindDynamicSymbol(const std::string& symbol_name) const;
@@ -145,10 +145,10 @@ class ElfFileImpl {
bool CheckSectionsExist(std::string* error_msg) const;
// Check that the link of the first section links to the second section.
- bool CheckSectionsLinked(const byte* source, const byte* target) const;
+ bool CheckSectionsLinked(const uint8_t* source, const uint8_t* target) const;
// Check whether the offset is in range, and set to target to Begin() + offset if OK.
- bool CheckAndSet(Elf32_Off offset, const char* label, byte** target, std::string* error_msg);
+ bool CheckAndSet(Elf32_Off offset, const char* label, uint8_t** target, std::string* error_msg);
// Find symbol in specified table, returning nullptr if it is not found.
//
@@ -182,13 +182,13 @@ class ElfFileImpl {
// Pointer to start of first PT_LOAD program segment after Load()
// when program_header_only_ is true.
- byte* base_address_;
+ uint8_t* base_address_;
// The program header should always available but use GetProgramHeadersStart() to be sure.
- byte* program_headers_start_;
+ uint8_t* program_headers_start_;
// Conditionally available values. Use accessors to ensure they exist if they are required.
- byte* section_headers_start_;
+ uint8_t* section_headers_start_;
Elf_Phdr* dynamic_program_header_;
Elf_Dyn* dynamic_section_start_;
Elf_Sym* symtab_section_start_;
@@ -201,7 +201,7 @@ class ElfFileImpl {
SymbolTable* dynsym_symbol_table_;
// Support for GDB JIT
- byte* jit_elf_image_;
+ uint8_t* jit_elf_image_;
JITCodeEntry* jit_gdb_entry_;
std::unique_ptr<ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel,
diff --git a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
index 7f6144bede..642c94a01e 100644
--- a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
@@ -64,8 +64,8 @@ class PortableArgumentVisitor {
caller_mh_(caller_mh),
args_in_regs_(ComputeArgsInRegs(caller_mh)),
num_params_(caller_mh.NumArgs()),
- reg_args_(reinterpret_cast<byte*>(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET),
- stack_args_(reinterpret_cast<byte*>(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE
+ reg_args_(reinterpret_cast<uint8_t*>(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET),
+ stack_args_(reinterpret_cast<uint8_t*>(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE
+ PORTABLE_STACK_ARG_SKIP),
cur_args_(reg_args_),
cur_arg_index_(0),
@@ -88,8 +88,8 @@ class PortableArgumentVisitor {
return caller_mh_.GetParamPrimitiveType(param_index_);
}
- byte* GetParamAddress() const {
- return cur_args_ + (cur_arg_index_ * kPointerSize);
+ uint8_t* GetParamAddress() const {
+ return cur_args_ + (cur_arg_index_ * sizeof(void*));
}
void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -137,9 +137,9 @@ class PortableArgumentVisitor {
MethodHelper& caller_mh_;
const size_t args_in_regs_;
const size_t num_params_;
- byte* const reg_args_;
- byte* const stack_args_;
- byte* cur_args_;
+ uint8_t* const reg_args_;
+ uint8_t* const stack_args_;
+ uint8_t* cur_args_;
size_t cur_arg_index_;
size_t param_index_;
};
diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
index 49df62db47..42ace40637 100644
--- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
@@ -60,7 +60,7 @@ extern "C" TwoWordReturn artInstrumentationMethodExitFromCode(Thread* self,
Runtime* runtime = Runtime::Current();
sp->Assign(runtime->GetCalleeSaveMethod(Runtime::kRefsOnly));
uint32_t return_pc_offset = GetCalleeSavePCOffset(kRuntimeISA, Runtime::kRefsOnly);
- uintptr_t* return_pc = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp) +
+ uintptr_t* return_pc = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) +
return_pc_offset);
CHECK_EQ(*return_pc, 0U);
self->SetTopOfStack(sp, 0);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index f970ef8e85..054dd4698d 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -202,7 +202,7 @@ class QuickArgumentVisitor {
static mirror::ArtMethod* GetCallingMethod(StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod());
- byte* previous_sp = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
+ uint8_t* previous_sp = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
return reinterpret_cast<StackReference<mirror::ArtMethod>*>(previous_sp)->AsMirrorPtr();
}
@@ -210,16 +210,16 @@ class QuickArgumentVisitor {
static uintptr_t GetCallingPc(StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod());
- byte* lr = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset;
+ uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset;
return *reinterpret_cast<uintptr_t*>(lr);
}
QuickArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static, const char* shorty,
uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len),
- gpr_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset),
- fpr_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset),
- stack_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize
+ gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset),
+ fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset),
+ stack_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize
+ StackArgumentStartFromShorty(is_static, shorty, shorty_len)),
gpr_index_(0), fpr_index_(0), stack_index_(0), cur_type_(Primitive::kPrimVoid),
is_split_long_or_double_(false) {}
@@ -232,7 +232,7 @@ class QuickArgumentVisitor {
return cur_type_;
}
- byte* GetParamAddress() const {
+ uint8_t* GetParamAddress() const {
if (!kQuickSoftFloatAbi) {
Primitive::Type type = GetParamPrimitiveType();
if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) {
@@ -398,9 +398,9 @@ class QuickArgumentVisitor {
const uint32_t shorty_len_;
private:
- byte* const gpr_args_; // Address of GPR arguments in callee save frame.
- byte* const fpr_args_; // Address of FPR arguments in callee save frame.
- byte* const stack_args_; // Address of stack arguments in caller's frame.
+ uint8_t* const gpr_args_; // Address of GPR arguments in callee save frame.
+ uint8_t* const fpr_args_; // Address of FPR arguments in callee save frame.
+ uint8_t* const stack_args_; // Address of stack arguments in caller's frame.
uint32_t gpr_index_; // Index into spilled GPRs.
uint32_t fpr_index_; // Index into spilled FPRs.
uint32_t stack_index_; // Index into arguments on the stack.
@@ -1286,7 +1286,7 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
// We have to squeeze in the HandleScope, and relocate the method pointer.
// "Free" the slot for the method.
- sp8 += kPointerSize; // In the callee-save frame we use a full pointer.
+ sp8 += sizeof(void*); // In the callee-save frame we use a full pointer.
// Under the callee saves put handle scope and new method stack reference.
*handle_scope_entries = num_handle_scope_references_;
@@ -1868,7 +1868,7 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(mirror::ArtMethod* interfa
// Find the caller PC.
constexpr size_t pc_offset = GetCalleeSavePCOffset(kRuntimeISA, Runtime::kRefsAndArgs);
- uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp) + pc_offset);
+ uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) + pc_offset);
// Map the caller PC to a dex PC.
uint32_t dex_pc = caller_method->ToDexPc(caller_pc);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index 66ee218bae..02b8a5b605 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -95,7 +95,7 @@ TEST_F(QuickTrampolineEntrypointsTest, PointerSize) {
TEST_F(QuickTrampolineEntrypointsTest, ReturnPC) {
// Ensure that the computation in callee_save_frame.h correct.
// Note: we can only check against the kRuntimeISA, because the ArtMethod computation uses
- // kPointerSize, which is wrong when the target bitwidth is not the same as the host's.
+ // sizeof(void*), which is wrong when the target bitwidth is not the same as the host's.
CheckPCOffset(kRuntimeISA, Runtime::kRefsAndArgs,
GetCalleeSavePCOffset(kRuntimeISA, Runtime::kRefsAndArgs));
CheckPCOffset(kRuntimeISA, Runtime::kRefsOnly,
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index 305e5a2c9d..cfd2a3d0d0 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -86,201 +86,201 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
// TODO: Better connection. Take alignment into account.
EXPECT_OFFSET_DIFF_GT3(Thread, tls64_.stats, tlsPtr_.card_table, 8, thread_tls64_to_tlsptr);
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, card_table, exception, kPointerSize);
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, exception, stack_end, kPointerSize);
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, stack_end, managed_stack, kPointerSize);
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, card_table, exception, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, exception, stack_end, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, stack_end, managed_stack, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, managed_stack, suspend_trigger, sizeof(ManagedStack));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, suspend_trigger, jni_env, kPointerSize);
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, jni_env, self, kPointerSize);
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, self, opeer, kPointerSize);
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, opeer, jpeer, kPointerSize);
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, jpeer, stack_begin, kPointerSize);
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, stack_begin, stack_size, kPointerSize);
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, stack_size, throw_location, kPointerSize);
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, suspend_trigger, jni_env, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, jni_env, self, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, self, opeer, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, opeer, jpeer, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, jpeer, stack_begin, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, stack_begin, stack_size, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, stack_size, throw_location, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, throw_location, stack_trace_sample, sizeof(ThrowLocation));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, stack_trace_sample, wait_next, kPointerSize);
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, wait_next, monitor_enter_object, kPointerSize);
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, monitor_enter_object, top_handle_scope, kPointerSize);
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, top_handle_scope, class_loader_override, kPointerSize);
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, class_loader_override, long_jump_context, kPointerSize);
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, long_jump_context, instrumentation_stack, kPointerSize);
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, instrumentation_stack, debug_invoke_req, kPointerSize);
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, debug_invoke_req, single_step_control, kPointerSize);
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, stack_trace_sample, wait_next, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, wait_next, monitor_enter_object, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, monitor_enter_object, top_handle_scope, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, top_handle_scope, class_loader_override, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, class_loader_override, long_jump_context, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, long_jump_context, instrumentation_stack, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, instrumentation_stack, debug_invoke_req, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, debug_invoke_req, single_step_control, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, single_step_control, deoptimization_shadow_frame,
- kPointerSize);
+ sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, deoptimization_shadow_frame,
- shadow_frame_under_construction, kPointerSize);
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, shadow_frame_under_construction, name, kPointerSize);
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, name, pthread_self, kPointerSize);
+ shadow_frame_under_construction, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, shadow_frame_under_construction, name, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, name, pthread_self, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, pthread_self, last_no_thread_suspension_cause,
- kPointerSize);
+ sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, last_no_thread_suspension_cause, checkpoint_functions,
- kPointerSize);
+ sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, checkpoint_functions, interpreter_entrypoints,
- kPointerSize * 3);
+ sizeof(void*) * 3);
// Skip across the entrypoints structures.
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_start, thread_local_pos, kPointerSize);
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_pos, thread_local_end, kPointerSize);
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_end, thread_local_objects, kPointerSize);
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_objects, rosalloc_runs, kPointerSize);
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_start, thread_local_pos, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_pos, thread_local_end, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_end, thread_local_objects, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_objects, rosalloc_runs, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, rosalloc_runs, thread_local_alloc_stack_top,
- kPointerSize * kNumRosAllocThreadLocalSizeBrackets);
+ sizeof(void*) * kNumRosAllocThreadLocalSizeBrackets);
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_alloc_stack_top, thread_local_alloc_stack_end,
- kPointerSize);
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_alloc_stack_end, held_mutexes, kPointerSize);
+ sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_alloc_stack_end, held_mutexes, sizeof(void*));
EXPECT_OFFSET_DIFF(Thread, tlsPtr_.held_mutexes, Thread, wait_mutex_,
- kPointerSize * kLockLevelCount + kPointerSize, thread_tlsptr_end);
+ sizeof(void*) * kLockLevelCount + sizeof(void*), thread_tlsptr_end);
}
void CheckInterpreterEntryPoints() {
CHECKED(OFFSETOF_MEMBER(InterpreterEntryPoints, pInterpreterToInterpreterBridge) == 0,
InterpreterEntryPoints_start_with_i2i);
EXPECT_OFFSET_DIFFNP(InterpreterEntryPoints, pInterpreterToInterpreterBridge,
- pInterpreterToCompiledCodeBridge, kPointerSize);
+ pInterpreterToCompiledCodeBridge, sizeof(void*));
CHECKED(OFFSETOF_MEMBER(InterpreterEntryPoints, pInterpreterToCompiledCodeBridge)
- + kPointerSize == sizeof(InterpreterEntryPoints), InterpreterEntryPoints_all);
+ + sizeof(void*) == sizeof(InterpreterEntryPoints), InterpreterEntryPoints_all);
}
void CheckJniEntryPoints() {
CHECKED(OFFSETOF_MEMBER(JniEntryPoints, pDlsymLookup) == 0,
JniEntryPoints_start_with_dlsymlookup);
CHECKED(OFFSETOF_MEMBER(JniEntryPoints, pDlsymLookup)
- + kPointerSize == sizeof(JniEntryPoints), JniEntryPoints_all);
+ + sizeof(void*) == sizeof(JniEntryPoints), JniEntryPoints_all);
}
void CheckPortableEntryPoints() {
CHECKED(OFFSETOF_MEMBER(PortableEntryPoints, pPortableImtConflictTrampoline) == 0,
PortableEntryPoints_start_with_imt);
EXPECT_OFFSET_DIFFNP(PortableEntryPoints, pPortableImtConflictTrampoline,
- pPortableResolutionTrampoline, kPointerSize);
+ pPortableResolutionTrampoline, sizeof(void*));
EXPECT_OFFSET_DIFFNP(PortableEntryPoints, pPortableResolutionTrampoline,
- pPortableToInterpreterBridge, kPointerSize);
+ pPortableToInterpreterBridge, sizeof(void*));
CHECKED(OFFSETOF_MEMBER(PortableEntryPoints, pPortableToInterpreterBridge)
- + kPointerSize == sizeof(PortableEntryPoints), PortableEntryPoints_all);
+ + sizeof(void*) == sizeof(PortableEntryPoints), PortableEntryPoints_all);
}
void CheckQuickEntryPoints() {
CHECKED(OFFSETOF_MEMBER(QuickEntryPoints, pAllocArray) == 0,
QuickEntryPoints_start_with_allocarray);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArray, pAllocArrayResolved, kPointerSize);
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArray, pAllocArrayResolved, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArrayResolved, pAllocArrayWithAccessCheck,
- kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArrayWithAccessCheck, pAllocObject, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObject, pAllocObjectResolved, kPointerSize);
+ sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArrayWithAccessCheck, pAllocObject, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObject, pAllocObjectResolved, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObjectResolved, pAllocObjectInitialized,
- kPointerSize);
+ sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObjectInitialized, pAllocObjectWithAccessCheck,
- kPointerSize);
+ sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObjectWithAccessCheck, pCheckAndAllocArray,
- kPointerSize);
+ sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCheckAndAllocArray, pCheckAndAllocArrayWithAccessCheck,
- kPointerSize);
+ sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCheckAndAllocArrayWithAccessCheck,
- pInstanceofNonTrivial, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInstanceofNonTrivial, pCheckCast, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCheckCast, pInitializeStaticStorage, kPointerSize);
+ pInstanceofNonTrivial, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInstanceofNonTrivial, pCheckCast, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCheckCast, pInitializeStaticStorage, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeStaticStorage, pInitializeTypeAndVerifyAccess,
- kPointerSize);
+ sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeTypeAndVerifyAccess, pInitializeType,
- kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeType, pResolveString, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pResolveString, pSet8Instance, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet8Instance, pSet8Static, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet8Static, pSet16Instance, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet16Instance, pSet16Static, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet16Static, pSet32Instance, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet32Instance, pSet32Static, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet32Static, pSet64Instance, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet64Instance, pSet64Static, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet64Static, pSetObjInstance, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSetObjInstance, pSetObjStatic, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSetObjStatic, pGetByteInstance, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetByteInstance, pGetBooleanInstance, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetBooleanInstance, pGetByteStatic, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetByteStatic, pGetBooleanStatic, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetBooleanStatic, pGetShortInstance, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetShortInstance, pGetCharInstance, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetCharInstance, pGetShortStatic, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetShortStatic, pGetCharStatic, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetCharStatic, pGet32Instance, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGet32Instance, pGet32Static, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGet32Static, pGet64Instance, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGet64Instance, pGet64Static, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGet64Static, pGetObjInstance, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetObjInstance, pGetObjStatic, kPointerSize);
+ sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeType, pResolveString, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pResolveString, pSet8Instance, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet8Instance, pSet8Static, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet8Static, pSet16Instance, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet16Instance, pSet16Static, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet16Static, pSet32Instance, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet32Instance, pSet32Static, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet32Static, pSet64Instance, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet64Instance, pSet64Static, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet64Static, pSetObjInstance, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSetObjInstance, pSetObjStatic, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSetObjStatic, pGetByteInstance, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetByteInstance, pGetBooleanInstance, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetBooleanInstance, pGetByteStatic, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetByteStatic, pGetBooleanStatic, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetBooleanStatic, pGetShortInstance, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetShortInstance, pGetCharInstance, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetCharInstance, pGetShortStatic, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetShortStatic, pGetCharStatic, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetCharStatic, pGet32Instance, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGet32Instance, pGet32Static, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGet32Static, pGet64Instance, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGet64Instance, pGet64Static, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGet64Static, pGetObjInstance, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetObjInstance, pGetObjStatic, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetObjStatic, pAputObjectWithNullAndBoundCheck,
- kPointerSize);
+ sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAputObjectWithNullAndBoundCheck,
- pAputObjectWithBoundCheck, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAputObjectWithBoundCheck, pAputObject, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAputObject, pHandleFillArrayData, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pHandleFillArrayData, pJniMethodStart, kPointerSize);
+ pAputObjectWithBoundCheck, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAputObjectWithBoundCheck, pAputObject, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAputObject, pHandleFillArrayData, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pHandleFillArrayData, pJniMethodStart, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pJniMethodStart, pJniMethodStartSynchronized,
- kPointerSize);
+ sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pJniMethodStartSynchronized, pJniMethodEnd,
- kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pJniMethodEnd, pJniMethodEndSynchronized, kPointerSize);
+ sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pJniMethodEnd, pJniMethodEndSynchronized, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pJniMethodEndSynchronized, pJniMethodEndWithReference,
- kPointerSize);
+ sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pJniMethodEndWithReference,
- pJniMethodEndWithReferenceSynchronized, kPointerSize);
+ pJniMethodEndWithReferenceSynchronized, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pJniMethodEndWithReferenceSynchronized,
- pQuickGenericJniTrampoline, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pQuickGenericJniTrampoline, pLockObject, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pLockObject, pUnlockObject, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pUnlockObject, pCmpgDouble, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCmpgDouble, pCmpgFloat, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCmpgFloat, pCmplDouble, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCmplDouble, pCmplFloat, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCmplFloat, pFmod, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pFmod, pL2d, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pL2d, pFmodf, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pFmodf, pL2f, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pL2f, pD2iz, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pD2iz, pF2iz, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pF2iz, pIdivmod, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pIdivmod, pD2l, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pD2l, pF2l, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pF2l, pLdiv, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pLdiv, pLmod, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pLmod, pLmul, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pLmul, pShlLong, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pShlLong, pShrLong, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pShrLong, pUshrLong, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pUshrLong, pIndexOf, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pIndexOf, pStringCompareTo, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pStringCompareTo, pMemcpy, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pMemcpy, pQuickImtConflictTrampoline, kPointerSize);
+ pQuickGenericJniTrampoline, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pQuickGenericJniTrampoline, pLockObject, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pLockObject, pUnlockObject, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pUnlockObject, pCmpgDouble, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCmpgDouble, pCmpgFloat, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCmpgFloat, pCmplDouble, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCmplDouble, pCmplFloat, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCmplFloat, pFmod, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pFmod, pL2d, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pL2d, pFmodf, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pFmodf, pL2f, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pL2f, pD2iz, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pD2iz, pF2iz, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pF2iz, pIdivmod, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pIdivmod, pD2l, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pD2l, pF2l, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pF2l, pLdiv, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pLdiv, pLmod, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pLmod, pLmul, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pLmul, pShlLong, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pShlLong, pShrLong, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pShrLong, pUshrLong, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pUshrLong, pIndexOf, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pIndexOf, pStringCompareTo, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pStringCompareTo, pMemcpy, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pMemcpy, pQuickImtConflictTrampoline, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pQuickImtConflictTrampoline, pQuickResolutionTrampoline,
- kPointerSize);
+ sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pQuickResolutionTrampoline, pQuickToInterpreterBridge,
- kPointerSize);
+ sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pQuickToInterpreterBridge,
- pInvokeDirectTrampolineWithAccessCheck, kPointerSize);
+ pInvokeDirectTrampolineWithAccessCheck, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInvokeDirectTrampolineWithAccessCheck,
- pInvokeInterfaceTrampolineWithAccessCheck, kPointerSize);
+ pInvokeInterfaceTrampolineWithAccessCheck, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInvokeInterfaceTrampolineWithAccessCheck,
- pInvokeStaticTrampolineWithAccessCheck, kPointerSize);
+ pInvokeStaticTrampolineWithAccessCheck, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInvokeStaticTrampolineWithAccessCheck,
- pInvokeSuperTrampolineWithAccessCheck, kPointerSize);
+ pInvokeSuperTrampolineWithAccessCheck, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInvokeSuperTrampolineWithAccessCheck,
- pInvokeVirtualTrampolineWithAccessCheck, kPointerSize);
+ pInvokeVirtualTrampolineWithAccessCheck, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInvokeVirtualTrampolineWithAccessCheck,
- pTestSuspend, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pTestSuspend, pDeliverException, kPointerSize);
+ pTestSuspend, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pTestSuspend, pDeliverException, sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pDeliverException, pThrowArrayBounds, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowArrayBounds, pThrowDivZero, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowDivZero, pThrowNoSuchMethod, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowNoSuchMethod, pThrowNullPointer, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowNullPointer, pThrowStackOverflow, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowStackOverflow, pA64Load, kPointerSize);
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pA64Load, pA64Store, kPointerSize);
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pDeliverException, pThrowArrayBounds, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowArrayBounds, pThrowDivZero, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowDivZero, pThrowNoSuchMethod, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowNoSuchMethod, pThrowNullPointer, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowNullPointer, pThrowStackOverflow, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowStackOverflow, pA64Load, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pA64Load, pA64Store, sizeof(void*));
CHECKED(OFFSETOF_MEMBER(QuickEntryPoints, pA64Store)
- + kPointerSize == sizeof(QuickEntryPoints), QuickEntryPoints_all);
+ + sizeof(void*) == sizeof(QuickEntryPoints), QuickEntryPoints_all);
}
};
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 6033a5fc5d..3a17ecaa57 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -77,7 +77,7 @@ class ExceptionTest : public CommonRuntimeTest {
uint32_t vmap_table_offset = sizeof(OatQuickMethodHeader) + fake_vmap_table_data.size();
uint32_t mapping_table_offset = vmap_table_offset + fake_mapping_data.size();
OatQuickMethodHeader method_header(mapping_table_offset, vmap_table_offset,
- 4 * kPointerSize, 0u, 0u, code_size);
+ 4 * sizeof(void*), 0u, 0u, code_size);
fake_header_code_and_maps_.resize(sizeof(method_header));
memcpy(&fake_header_code_and_maps_[0], &method_header, sizeof(method_header));
fake_header_code_and_maps_.insert(fake_header_code_and_maps_.begin(),
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 2c72ba13ec..929a1d2ee2 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -213,7 +213,7 @@ class AtomicStack {
mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), NULL, capacity_ * sizeof(T),
PROT_READ | PROT_WRITE, false, &error_msg));
CHECK(mem_map_.get() != NULL) << "couldn't allocate mark stack.\n" << error_msg;
- byte* addr = mem_map_->Begin();
+ uint8_t* addr = mem_map_->Begin();
CHECK(addr != NULL);
debug_is_sorted_ = true;
begin_ = reinterpret_cast<T*>(addr);
diff --git a/runtime/gc/accounting/card_table-inl.h b/runtime/gc/accounting/card_table-inl.h
index 3b06f74d72..15562e506e 100644
--- a/runtime/gc/accounting/card_table-inl.h
+++ b/runtime/gc/accounting/card_table-inl.h
@@ -27,9 +27,9 @@ namespace art {
namespace gc {
namespace accounting {
-static inline bool byte_cas(byte old_value, byte new_value, byte* address) {
+static inline bool byte_cas(uint8_t old_value, uint8_t new_value, uint8_t* address) {
#if defined(__i386__) || defined(__x86_64__)
- Atomic<byte>* byte_atomic = reinterpret_cast<Atomic<byte>*>(address);
+ Atomic<uint8_t>* byte_atomic = reinterpret_cast<Atomic<uint8_t>*>(address);
return byte_atomic->CompareExchangeWeakRelaxed(old_value, new_value);
#else
// Little endian means most significant byte is on the left.
@@ -49,19 +49,19 @@ static inline bool byte_cas(byte old_value, byte new_value, byte* address) {
}
template <typename Visitor>
-inline size_t CardTable::Scan(ContinuousSpaceBitmap* bitmap, byte* scan_begin, byte* scan_end,
- const Visitor& visitor, const byte minimum_age) const {
- DCHECK_GE(scan_begin, reinterpret_cast<byte*>(bitmap->HeapBegin()));
+inline size_t CardTable::Scan(ContinuousSpaceBitmap* bitmap, uint8_t* scan_begin, uint8_t* scan_end,
+ const Visitor& visitor, const uint8_t minimum_age) const {
+ DCHECK_GE(scan_begin, reinterpret_cast<uint8_t*>(bitmap->HeapBegin()));
// scan_end is the byte after the last byte we scan.
- DCHECK_LE(scan_end, reinterpret_cast<byte*>(bitmap->HeapLimit()));
- byte* card_cur = CardFromAddr(scan_begin);
- byte* card_end = CardFromAddr(AlignUp(scan_end, kCardSize));
+ DCHECK_LE(scan_end, reinterpret_cast<uint8_t*>(bitmap->HeapLimit()));
+ uint8_t* card_cur = CardFromAddr(scan_begin);
+ uint8_t* card_end = CardFromAddr(AlignUp(scan_end, kCardSize));
CheckCardValid(card_cur);
CheckCardValid(card_end);
size_t cards_scanned = 0;
// Handle any unaligned cards at the start.
- while (!IsAligned<sizeof(word)>(card_cur) && card_cur < card_end) {
+ while (!IsAligned<sizeof(intptr_t)>(card_cur) && card_cur < card_end) {
if (*card_cur >= minimum_age) {
uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(card_cur));
bitmap->VisitMarkedRange(start, start + kCardSize, visitor);
@@ -70,7 +70,7 @@ inline size_t CardTable::Scan(ContinuousSpaceBitmap* bitmap, byte* scan_begin, b
++card_cur;
}
- byte* aligned_end = card_end -
+ uint8_t* aligned_end = card_end -
(reinterpret_cast<uintptr_t>(card_end) & (sizeof(uintptr_t) - 1));
uintptr_t* word_end = reinterpret_cast<uintptr_t*>(aligned_end);
@@ -85,14 +85,14 @@ inline size_t CardTable::Scan(ContinuousSpaceBitmap* bitmap, byte* scan_begin, b
// Find the first dirty card.
uintptr_t start_word = *word_cur;
- uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(reinterpret_cast<byte*>(word_cur)));
+ uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(reinterpret_cast<uint8_t*>(word_cur)));
// TODO: Investigate if processing continuous runs of dirty cards with a single bitmap visit is
// more efficient.
for (size_t i = 0; i < sizeof(uintptr_t); ++i) {
- if (static_cast<byte>(start_word) >= minimum_age) {
- auto* card = reinterpret_cast<byte*>(word_cur) + i;
- DCHECK(*card == static_cast<byte>(start_word) || *card == kCardDirty)
- << "card " << static_cast<size_t>(*card) << " word " << (start_word & 0xFF);
+ if (static_cast<uint8_t>(start_word) >= minimum_age) {
+ auto* card = reinterpret_cast<uint8_t*>(word_cur) + i;
+ DCHECK(*card == static_cast<uint8_t>(start_word) || *card == kCardDirty)
+ << "card " << static_cast<size_t>(*card) << " intptr_t " << (start_word & 0xFF);
bitmap->VisitMarkedRange(start, start + kCardSize, visitor);
++cards_scanned;
}
@@ -103,7 +103,7 @@ inline size_t CardTable::Scan(ContinuousSpaceBitmap* bitmap, byte* scan_begin, b
exit_for:
// Handle any unaligned cards at the end.
- card_cur = reinterpret_cast<byte*>(word_end);
+ card_cur = reinterpret_cast<uint8_t*>(word_end);
while (card_cur < card_end) {
if (*card_cur >= minimum_age) {
uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(card_cur));
@@ -125,16 +125,16 @@ inline size_t CardTable::Scan(ContinuousSpaceBitmap* bitmap, byte* scan_begin, b
* us to know which cards got cleared.
*/
template <typename Visitor, typename ModifiedVisitor>
-inline void CardTable::ModifyCardsAtomic(byte* scan_begin, byte* scan_end, const Visitor& visitor,
+inline void CardTable::ModifyCardsAtomic(uint8_t* scan_begin, uint8_t* scan_end, const Visitor& visitor,
const ModifiedVisitor& modified) {
- byte* card_cur = CardFromAddr(scan_begin);
- byte* card_end = CardFromAddr(AlignUp(scan_end, kCardSize));
+ uint8_t* card_cur = CardFromAddr(scan_begin);
+ uint8_t* card_end = CardFromAddr(AlignUp(scan_end, kCardSize));
CheckCardValid(card_cur);
CheckCardValid(card_end);
// Handle any unaligned cards at the start.
- while (!IsAligned<sizeof(word)>(card_cur) && card_cur < card_end) {
- byte expected, new_value;
+ while (!IsAligned<sizeof(intptr_t)>(card_cur) && card_cur < card_end) {
+ uint8_t expected, new_value;
do {
expected = *card_cur;
new_value = visitor(expected);
@@ -146,9 +146,9 @@ inline void CardTable::ModifyCardsAtomic(byte* scan_begin, byte* scan_end, const
}
// Handle unaligned cards at the end.
- while (!IsAligned<sizeof(word)>(card_end) && card_end > card_cur) {
+ while (!IsAligned<sizeof(intptr_t)>(card_end) && card_end > card_cur) {
--card_end;
- byte expected, new_value;
+ uint8_t expected, new_value;
do {
expected = *card_end;
new_value = visitor(expected);
@@ -184,10 +184,10 @@ inline void CardTable::ModifyCardsAtomic(byte* scan_begin, byte* scan_end, const
Atomic<uintptr_t>* atomic_word = reinterpret_cast<Atomic<uintptr_t>*>(word_cur);
if (LIKELY(atomic_word->CompareExchangeWeakRelaxed(expected_word, new_word))) {
for (size_t i = 0; i < sizeof(uintptr_t); ++i) {
- const byte expected_byte = expected_bytes[i];
- const byte new_byte = new_bytes[i];
+ const uint8_t expected_byte = expected_bytes[i];
+ const uint8_t new_byte = new_bytes[i];
if (expected_byte != new_byte) {
- modified(reinterpret_cast<byte*>(word_cur) + i, expected_byte, new_byte);
+ modified(reinterpret_cast<uint8_t*>(word_cur) + i, expected_byte, new_byte);
}
}
break;
@@ -197,7 +197,7 @@ inline void CardTable::ModifyCardsAtomic(byte* scan_begin, byte* scan_end, const
}
}
-inline void* CardTable::AddrFromCard(const byte *card_addr) const {
+inline void* CardTable::AddrFromCard(const uint8_t *card_addr) const {
DCHECK(IsValidCard(card_addr))
<< " card_addr: " << reinterpret_cast<const void*>(card_addr)
<< " begin: " << reinterpret_cast<void*>(mem_map_->Begin() + offset_)
@@ -206,15 +206,15 @@ inline void* CardTable::AddrFromCard(const byte *card_addr) const {
return reinterpret_cast<void*>(offset << kCardShift);
}
-inline byte* CardTable::CardFromAddr(const void *addr) const {
- byte *card_addr = biased_begin_ + (reinterpret_cast<uintptr_t>(addr) >> kCardShift);
+inline uint8_t* CardTable::CardFromAddr(const void *addr) const {
+ uint8_t *card_addr = biased_begin_ + (reinterpret_cast<uintptr_t>(addr) >> kCardShift);
// Sanity check the caller was asking for address covered by the card table
DCHECK(IsValidCard(card_addr)) << "addr: " << addr
<< " card_addr: " << reinterpret_cast<void*>(card_addr);
return card_addr;
}
-inline void CardTable::CheckCardValid(byte* card) const {
+inline void CardTable::CheckCardValid(uint8_t* card) const {
DCHECK(IsValidCard(card))
<< " card_addr: " << reinterpret_cast<const void*>(card)
<< " begin: " << reinterpret_cast<void*>(mem_map_->Begin() + offset_)
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 049855000b..9a6f2b20be 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -55,7 +55,7 @@ constexpr uint8_t CardTable::kCardDirty;
* byte is equal to GC_DIRTY_CARD. See CardTable::Create for details.
*/
-CardTable* CardTable::Create(const byte* heap_begin, size_t heap_capacity) {
+CardTable* CardTable::Create(const uint8_t* heap_begin, size_t heap_capacity) {
/* Set up the card table */
size_t capacity = heap_capacity / kCardSize;
/* Allocate an extra 256 bytes to allow fixed low-byte of base */
@@ -68,13 +68,13 @@ CardTable* CardTable::Create(const byte* heap_begin, size_t heap_capacity) {
// don't clear the card table to avoid unnecessary pages being allocated
COMPILE_ASSERT(kCardClean == 0, card_clean_must_be_0);
- byte* cardtable_begin = mem_map->Begin();
+ uint8_t* cardtable_begin = mem_map->Begin();
CHECK(cardtable_begin != NULL);
// We allocated up to a bytes worth of extra space to allow biased_begin's byte value to equal
// kCardDirty, compute a offset value to make this the case
size_t offset = 0;
- byte* biased_begin = reinterpret_cast<byte*>(reinterpret_cast<uintptr_t>(cardtable_begin) -
+ uint8_t* biased_begin = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(cardtable_begin) -
(reinterpret_cast<uintptr_t>(heap_begin) >> kCardShift));
uintptr_t biased_byte = reinterpret_cast<uintptr_t>(biased_begin) & 0xff;
if (biased_byte != kCardDirty) {
@@ -86,14 +86,14 @@ CardTable* CardTable::Create(const byte* heap_begin, size_t heap_capacity) {
return new CardTable(mem_map.release(), biased_begin, offset);
}
-CardTable::CardTable(MemMap* mem_map, byte* biased_begin, size_t offset)
+CardTable::CardTable(MemMap* mem_map, uint8_t* biased_begin, size_t offset)
: mem_map_(mem_map), biased_begin_(biased_begin), offset_(offset) {
}
void CardTable::ClearSpaceCards(space::ContinuousSpace* space) {
// TODO: clear just the range of the table that has been modified
- byte* card_start = CardFromAddr(space->Begin());
- byte* card_end = CardFromAddr(space->End()); // Make sure to round up.
+ uint8_t* card_start = CardFromAddr(space->Begin());
+ uint8_t* card_end = CardFromAddr(space->End()); // Make sure to round up.
memset(reinterpret_cast<void*>(card_start), kCardClean, card_end - card_start);
}
@@ -106,10 +106,10 @@ bool CardTable::AddrIsInCardTable(const void* addr) const {
return IsValidCard(biased_begin_ + ((uintptr_t)addr >> kCardShift));
}
-void CardTable::CheckAddrIsInCardTable(const byte* addr) const {
- byte* card_addr = biased_begin_ + ((uintptr_t)addr >> kCardShift);
- byte* begin = mem_map_->Begin() + offset_;
- byte* end = mem_map_->End();
+void CardTable::CheckAddrIsInCardTable(const uint8_t* addr) const {
+ uint8_t* card_addr = biased_begin_ + ((uintptr_t)addr >> kCardShift);
+ uint8_t* begin = mem_map_->Begin() + offset_;
+ uint8_t* end = mem_map_->End();
CHECK(AddrIsInCardTable(addr))
<< "Card table " << this
<< " begin: " << reinterpret_cast<void*>(begin)
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index fbeea85554..e1343c8510 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -51,11 +51,11 @@ class CardTable {
static constexpr uint8_t kCardClean = 0x0;
static constexpr uint8_t kCardDirty = 0x70;
- static CardTable* Create(const byte* heap_begin, size_t heap_capacity);
+ static CardTable* Create(const uint8_t* heap_begin, size_t heap_capacity);
// Set the card associated with the given address to GC_CARD_DIRTY.
void MarkCard(const void *addr) {
- byte* card_addr = CardFromAddr(addr);
+ uint8_t* card_addr = CardFromAddr(addr);
*card_addr = kCardDirty;
}
@@ -65,16 +65,16 @@ class CardTable {
}
// Return the state of the card at an address.
- byte GetCard(const mirror::Object* obj) const {
+ uint8_t GetCard(const mirror::Object* obj) const {
return *CardFromAddr(obj);
}
// Visit and clear cards within memory range, only visits dirty cards.
template <typename Visitor>
void VisitClear(const void* start, const void* end, const Visitor& visitor) {
- byte* card_start = CardFromAddr(start);
- byte* card_end = CardFromAddr(end);
- for (byte* it = card_start; it != card_end; ++it) {
+ uint8_t* card_start = CardFromAddr(start);
+ uint8_t* card_end = CardFromAddr(end);
+ for (uint8_t* it = card_start; it != card_end; ++it) {
if (*it == kCardDirty) {
*it = kCardClean;
visitor(it);
@@ -84,7 +84,7 @@ class CardTable {
// Returns a value that when added to a heap address >> GC_CARD_SHIFT will address the appropriate
// card table byte. For convenience this value is cached in every Thread
- byte* GetBiasedBegin() const {
+ uint8_t* GetBiasedBegin() const {
return biased_begin_;
}
@@ -97,20 +97,20 @@ class CardTable {
* us to know which cards got cleared.
*/
template <typename Visitor, typename ModifiedVisitor>
- void ModifyCardsAtomic(byte* scan_begin, byte* scan_end, const Visitor& visitor,
+ void ModifyCardsAtomic(uint8_t* scan_begin, uint8_t* scan_end, const Visitor& visitor,
const ModifiedVisitor& modified);
// For every dirty at least minumum age between begin and end invoke the visitor with the
// specified argument. Returns how many cards the visitor was run on.
template <typename Visitor>
- size_t Scan(SpaceBitmap<kObjectAlignment>* bitmap, byte* scan_begin, byte* scan_end,
+ size_t Scan(SpaceBitmap<kObjectAlignment>* bitmap, uint8_t* scan_begin, uint8_t* scan_end,
const Visitor& visitor,
- const byte minimum_age = kCardDirty) const
+ const uint8_t minimum_age = kCardDirty) const
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Assertion used to check the given address is covered by the card table
- void CheckAddrIsInCardTable(const byte* addr) const;
+ void CheckAddrIsInCardTable(const uint8_t* addr) const;
// Resets all of the bytes in the card table to clean.
void ClearCardTable();
@@ -119,24 +119,24 @@ class CardTable {
void ClearSpaceCards(space::ContinuousSpace* space);
// Returns the first address in the heap which maps to this card.
- void* AddrFromCard(const byte *card_addr) const ALWAYS_INLINE;
+ void* AddrFromCard(const uint8_t *card_addr) const ALWAYS_INLINE;
// Returns the address of the relevant byte in the card table, given an address on the heap.
- byte* CardFromAddr(const void *addr) const ALWAYS_INLINE;
+ uint8_t* CardFromAddr(const void *addr) const ALWAYS_INLINE;
bool AddrIsInCardTable(const void* addr) const;
private:
- CardTable(MemMap* begin, byte* biased_begin, size_t offset);
+ CardTable(MemMap* begin, uint8_t* biased_begin, size_t offset);
// Returns true iff the card table address is within the bounds of the card table.
- bool IsValidCard(const byte* card_addr) const {
- byte* begin = mem_map_->Begin() + offset_;
- byte* end = mem_map_->End();
+ bool IsValidCard(const uint8_t* card_addr) const {
+ uint8_t* begin = mem_map_->Begin() + offset_;
+ uint8_t* end = mem_map_->End();
return card_addr >= begin && card_addr < end;
}
- void CheckCardValid(byte* card) const ALWAYS_INLINE;
+ void CheckCardValid(uint8_t* card) const ALWAYS_INLINE;
// Verifies that all gray objects are on a dirty card.
void VerifyCardTable();
@@ -144,7 +144,7 @@ class CardTable {
// Mmapped pages for the card table
std::unique_ptr<MemMap> mem_map_;
// Value used to compute card table addresses from object addresses, see GetBiasedBegin
- byte* const biased_begin_;
+ uint8_t* const biased_begin_;
// Card table doesn't begin at the beginning of the mem_map_, instead it is displaced by offset
// to allow the byte value of biased_begin_ to equal GC_CARD_DIRTY
const size_t offset_;
diff --git a/runtime/gc/accounting/card_table_test.cc b/runtime/gc/accounting/card_table_test.cc
index 433855a755..819cb852fa 100644
--- a/runtime/gc/accounting/card_table_test.cc
+++ b/runtime/gc/accounting/card_table_test.cc
@@ -49,45 +49,45 @@ class CardTableTest : public CommonRuntimeTest {
}
}
// Default values for the test, not random to avoid undeterministic behaviour.
- CardTableTest() : heap_begin_(reinterpret_cast<byte*>(0x2000000)), heap_size_(2 * MB) {
+ CardTableTest() : heap_begin_(reinterpret_cast<uint8_t*>(0x2000000)), heap_size_(2 * MB) {
}
void ClearCardTable() {
card_table_->ClearCardTable();
}
- byte* HeapBegin() const {
+ uint8_t* HeapBegin() const {
return heap_begin_;
}
- byte* HeapLimit() const {
+ uint8_t* HeapLimit() const {
return HeapBegin() + heap_size_;
}
// Return a pseudo random card for an address.
- byte PseudoRandomCard(const byte* addr) const {
+ uint8_t PseudoRandomCard(const uint8_t* addr) const {
size_t offset = RoundDown(addr - heap_begin_, CardTable::kCardSize);
return 1 + offset % 254;
}
void FillRandom() {
- for (const byte* addr = HeapBegin(); addr != HeapLimit(); addr += CardTable::kCardSize) {
+ for (const uint8_t* addr = HeapBegin(); addr != HeapLimit(); addr += CardTable::kCardSize) {
EXPECT_TRUE(card_table_->AddrIsInCardTable(addr));
- byte* card = card_table_->CardFromAddr(addr);
+ uint8_t* card = card_table_->CardFromAddr(addr);
*card = PseudoRandomCard(addr);
}
}
private:
- byte* const heap_begin_;
+ uint8_t* const heap_begin_;
const size_t heap_size_;
};
TEST_F(CardTableTest, TestMarkCard) {
CommonSetup();
- for (const byte* addr = HeapBegin(); addr < HeapLimit(); addr += kObjectAlignment) {
+ for (const uint8_t* addr = HeapBegin(); addr < HeapLimit(); addr += kObjectAlignment) {
auto obj = reinterpret_cast<const mirror::Object*>(addr);
EXPECT_EQ(card_table_->GetCard(obj), CardTable::kCardClean);
EXPECT_TRUE(!card_table_->IsDirty(obj));
card_table_->MarkCard(addr);
EXPECT_TRUE(card_table_->IsDirty(obj));
EXPECT_EQ(card_table_->GetCard(obj), CardTable::kCardDirty);
- byte* card_addr = card_table_->CardFromAddr(addr);
+ uint8_t* card_addr = card_table_->CardFromAddr(addr);
EXPECT_EQ(*card_addr, CardTable::kCardDirty);
*card_addr = CardTable::kCardClean;
EXPECT_EQ(*card_addr, CardTable::kCardClean);
@@ -96,10 +96,10 @@ TEST_F(CardTableTest, TestMarkCard) {
class UpdateVisitor {
public:
- byte operator()(byte c) const {
+ uint8_t operator()(uint8_t c) const {
return c * 93 + 123;
}
- void operator()(byte* /*card*/, byte /*expected_value*/, byte /*new_value*/) const {
+ void operator()(uint8_t* /*card*/, uint8_t /*expected_value*/, uint8_t /*new_value*/) const {
}
};
@@ -110,32 +110,32 @@ TEST_F(CardTableTest, TestModifyCardsAtomic) {
8U * CardTable::kCardSize);
UpdateVisitor visitor;
size_t start_offset = 0;
- for (byte* cstart = HeapBegin(); cstart < HeapBegin() + delta; cstart += CardTable::kCardSize) {
+ for (uint8_t* cstart = HeapBegin(); cstart < HeapBegin() + delta; cstart += CardTable::kCardSize) {
start_offset = (start_offset + kObjectAlignment) % CardTable::kCardSize;
size_t end_offset = 0;
- for (byte* cend = HeapLimit() - delta; cend < HeapLimit(); cend += CardTable::kCardSize) {
+ for (uint8_t* cend = HeapLimit() - delta; cend < HeapLimit(); cend += CardTable::kCardSize) {
// Don't always start at a card boundary.
- byte* start = cstart + start_offset;
- byte* end = cend - end_offset;
+ uint8_t* start = cstart + start_offset;
+ uint8_t* end = cend - end_offset;
end_offset = (end_offset + kObjectAlignment) % CardTable::kCardSize;
// Modify cards.
card_table_->ModifyCardsAtomic(start, end, visitor, visitor);
// Check adjacent cards not modified.
- for (byte* cur = start - CardTable::kCardSize; cur >= HeapBegin();
+ for (uint8_t* cur = start - CardTable::kCardSize; cur >= HeapBegin();
cur -= CardTable::kCardSize) {
EXPECT_EQ(card_table_->GetCard(reinterpret_cast<mirror::Object*>(cur)),
PseudoRandomCard(cur));
}
- for (byte* cur = end + CardTable::kCardSize; cur < HeapLimit();
+ for (uint8_t* cur = end + CardTable::kCardSize; cur < HeapLimit();
cur += CardTable::kCardSize) {
EXPECT_EQ(card_table_->GetCard(reinterpret_cast<mirror::Object*>(cur)),
PseudoRandomCard(cur));
}
// Verify Range.
- for (byte* cur = start; cur < AlignUp(end, CardTable::kCardSize);
+ for (uint8_t* cur = start; cur < AlignUp(end, CardTable::kCardSize);
cur += CardTable::kCardSize) {
- byte* card = card_table_->CardFromAddr(cur);
- byte value = PseudoRandomCard(cur);
+ uint8_t* card = card_table_->CardFromAddr(cur);
+ uint8_t value = PseudoRandomCard(cur);
EXPECT_EQ(visitor(value), *card);
// Restore for next iteration.
*card = value;
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 3acf80d8cf..753b42deb5 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -45,7 +45,7 @@ class ModUnionClearCardSetVisitor {
: cleared_cards_(cleared_cards) {
}
- inline void operator()(byte* card, byte expected_value, byte new_value) const {
+ inline void operator()(uint8_t* card, uint8_t expected_value, uint8_t new_value) const {
if (expected_value == CardTable::kCardDirty) {
cleared_cards_->insert(card);
}
@@ -57,17 +57,17 @@ class ModUnionClearCardSetVisitor {
class ModUnionClearCardVisitor {
public:
- explicit ModUnionClearCardVisitor(std::vector<byte*>* cleared_cards)
+ explicit ModUnionClearCardVisitor(std::vector<uint8_t*>* cleared_cards)
: cleared_cards_(cleared_cards) {
}
- void operator()(byte* card, byte expected_card, byte new_card) const {
+ void operator()(uint8_t* card, uint8_t expected_card, uint8_t new_card) const {
if (expected_card == CardTable::kCardDirty) {
cleared_cards_->push_back(card);
}
}
private:
- std::vector<byte*>* const cleared_cards_;
+ std::vector<uint8_t*>* const cleared_cards_;
};
class ModUnionUpdateObjectReferencesVisitor {
@@ -242,7 +242,7 @@ void ModUnionTableReferenceCache::Verify() {
CardTable* card_table = heap_->GetCardTable();
ContinuousSpaceBitmap* live_bitmap = space_->GetLiveBitmap();
for (const auto& ref_pair : references_) {
- const byte* card = ref_pair.first;
+ const uint8_t* card = ref_pair.first;
if (*card == CardTable::kCardClean) {
std::set<const Object*> reference_set;
for (mirror::HeapReference<Object>* obj_ptr : ref_pair.second) {
@@ -258,14 +258,14 @@ void ModUnionTableReferenceCache::Verify() {
void ModUnionTableReferenceCache::Dump(std::ostream& os) {
CardTable* card_table = heap_->GetCardTable();
os << "ModUnionTable cleared cards: [";
- for (byte* card_addr : cleared_cards_) {
+ for (uint8_t* card_addr : cleared_cards_) {
uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
uintptr_t end = start + CardTable::kCardSize;
os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << ",";
}
os << "]\nModUnionTable references: [";
for (const auto& ref_pair : references_) {
- const byte* card_addr = ref_pair.first;
+ const uint8_t* card_addr = ref_pair.first;
uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
uintptr_t end = start + CardTable::kCardSize;
os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << "->{";
@@ -349,7 +349,7 @@ void ModUnionTableCardCache::UpdateAndMarkReferences(MarkHeapReferenceCallback*
void ModUnionTableCardCache::Dump(std::ostream& os) {
CardTable* card_table = heap_->GetCardTable();
os << "ModUnionTable dirty cards: [";
- for (const byte* card_addr : cleared_cards_) {
+ for (const uint8_t* card_addr : cleared_cards_) {
auto start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
auto end = start + CardTable::kCardSize;
os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << "\n";
@@ -359,7 +359,7 @@ void ModUnionTableCardCache::Dump(std::ostream& os) {
void ModUnionTableCardCache::SetCards() {
CardTable* card_table = heap_->GetCardTable();
- for (byte* addr = space_->Begin(); addr < AlignUp(space_->End(), CardTable::kCardSize);
+ for (uint8_t* addr = space_->Begin(); addr < AlignUp(space_->End(), CardTable::kCardSize);
addr += CardTable::kCardSize) {
cleared_cards_.insert(card_table->CardFromAddr(addr));
}
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index d0e11e0222..d6342cf057 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -50,8 +50,8 @@ class HeapBitmap;
// cleared between GC phases, reducing the number of dirty cards that need to be scanned.
class ModUnionTable {
public:
- typedef std::set<byte*, std::less<byte*>,
- TrackingAllocator<byte*, kAllocatorTagModUnionCardSet>> CardSet;
+ typedef std::set<uint8_t*, std::less<uint8_t*>,
+ TrackingAllocator<uint8_t*, kAllocatorTagModUnionCardSet>> CardSet;
explicit ModUnionTable(const std::string& name, Heap* heap, space::ContinuousSpace* space)
: name_(name),
@@ -131,7 +131,7 @@ class ModUnionTableReferenceCache : public ModUnionTable {
ModUnionTable::CardSet cleared_cards_;
// Maps from dirty cards to their corresponding alloc space references.
- AllocationTrackingSafeMap<const byte*, std::vector<mirror::HeapReference<mirror::Object>*>,
+ AllocationTrackingSafeMap<const uint8_t*, std::vector<mirror::HeapReference<mirror::Object>*>,
kAllocatorTagModUnionReferenceArray> references_;
};
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index 3ff5874854..d43dc0a39a 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -42,7 +42,7 @@ class RememberedSetCardVisitor {
explicit RememberedSetCardVisitor(RememberedSet::CardSet* const dirty_cards)
: dirty_cards_(dirty_cards) {}
- void operator()(byte* card, byte expected_value, byte new_value) const {
+ void operator()(uint8_t* card, uint8_t expected_value, uint8_t new_value) const {
if (expected_value == CardTable::kCardDirty) {
dirty_cards_->insert(card);
}
@@ -129,7 +129,7 @@ void RememberedSet::UpdateAndMarkReferences(MarkHeapReferenceCallback* callback,
&contains_reference_to_target_space, arg);
ContinuousSpaceBitmap* bitmap = space_->GetLiveBitmap();
CardSet remove_card_set;
- for (byte* const card_addr : dirty_cards_) {
+ for (uint8_t* const card_addr : dirty_cards_) {
contains_reference_to_target_space = false;
uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
DCHECK(space_->HasAddress(reinterpret_cast<mirror::Object*>(start)));
@@ -145,7 +145,7 @@ void RememberedSet::UpdateAndMarkReferences(MarkHeapReferenceCallback* callback,
// Remove the cards that didn't contain a reference to the target
// space from the dirty card set.
- for (byte* const card_addr : remove_card_set) {
+ for (uint8_t* const card_addr : remove_card_set) {
DCHECK(dirty_cards_.find(card_addr) != dirty_cards_.end());
dirty_cards_.erase(card_addr);
}
@@ -154,7 +154,7 @@ void RememberedSet::UpdateAndMarkReferences(MarkHeapReferenceCallback* callback,
void RememberedSet::Dump(std::ostream& os) {
CardTable* card_table = heap_->GetCardTable();
os << "RememberedSet dirty cards: [";
- for (const byte* card_addr : dirty_cards_) {
+ for (const uint8_t* card_addr : dirty_cards_) {
auto start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
auto end = start + CardTable::kCardSize;
os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << "\n";
@@ -164,8 +164,8 @@ void RememberedSet::Dump(std::ostream& os) {
void RememberedSet::AssertAllDirtyCardsAreWithinSpace() const {
CardTable* card_table = heap_->GetCardTable();
- for (const byte* card_addr : dirty_cards_) {
- auto start = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
+ for (const uint8_t* card_addr : dirty_cards_) {
+ auto start = reinterpret_cast<uint8_t*>(card_table->AddrFromCard(card_addr));
auto end = start + CardTable::kCardSize;
DCHECK_LE(space_->Begin(), start);
DCHECK_LE(end, space_->Limit());
diff --git a/runtime/gc/accounting/remembered_set.h b/runtime/gc/accounting/remembered_set.h
index 8d66e0e9f6..c51e26db07 100644
--- a/runtime/gc/accounting/remembered_set.h
+++ b/runtime/gc/accounting/remembered_set.h
@@ -43,8 +43,8 @@ namespace accounting {
// from the free list spaces to the bump pointer spaces.
class RememberedSet {
public:
- typedef std::set<byte*, std::less<byte*>,
- TrackingAllocator<byte*, kAllocatorTagRememberedSet>> CardSet;
+ typedef std::set<uint8_t*, std::less<uint8_t*>,
+ TrackingAllocator<uint8_t*, kAllocatorTagRememberedSet>> CardSet;
explicit RememberedSet(const std::string& name, Heap* heap, space::ContinuousSpace* space)
: name_(name), heap_(heap), space_(space) {}
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index fc4213e8c6..11347a5bfe 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -35,10 +35,10 @@ inline bool SpaceBitmap<kAlignment>::AtomicTestAndSet(const mirror::Object* obj)
DCHECK_GE(addr, heap_begin_);
const uintptr_t offset = addr - heap_begin_;
const size_t index = OffsetToIndex(offset);
- const uword mask = OffsetToMask(offset);
- Atomic<uword>* atomic_entry = reinterpret_cast<Atomic<uword>*>(&bitmap_begin_[index]);
- DCHECK_LT(index, bitmap_size_ / kWordSize) << " bitmap_size_ = " << bitmap_size_;
- uword old_word;
+ const uintptr_t mask = OffsetToMask(offset);
+ Atomic<uintptr_t>* atomic_entry = reinterpret_cast<Atomic<uintptr_t>*>(&bitmap_begin_[index]);
+ DCHECK_LT(index, bitmap_size_ / sizeof(intptr_t)) << " bitmap_size_ = " << bitmap_size_;
+ uintptr_t old_word;
do {
old_word = atomic_entry->LoadRelaxed();
// Fast path: The bit is already set.
@@ -82,8 +82,8 @@ inline void SpaceBitmap<kAlignment>::VisitMarkedRange(uintptr_t visit_begin, uin
const uintptr_t index_start = OffsetToIndex(offset_start);
const uintptr_t index_end = OffsetToIndex(offset_end);
- const size_t bit_start = (offset_start / kAlignment) % kBitsPerWord;
- const size_t bit_end = (offset_end / kAlignment) % kBitsPerWord;
+ const size_t bit_start = (offset_start / kAlignment) % kBitsPerIntPtrT;
+ const size_t bit_end = (offset_end / kAlignment) % kBitsPerIntPtrT;
// Index(begin) ... Index(end)
// [xxxxx???][........][????yyyy]
@@ -93,12 +93,12 @@ inline void SpaceBitmap<kAlignment>::VisitMarkedRange(uintptr_t visit_begin, uin
//
// Left edge.
- uword left_edge = bitmap_begin_[index_start];
+ uintptr_t left_edge = bitmap_begin_[index_start];
// Mark of lower bits that are not in range.
- left_edge &= ~((static_cast<uword>(1) << bit_start) - 1);
+ left_edge &= ~((static_cast<uintptr_t>(1) << bit_start) - 1);
// Right edge. Either unique, or left_edge.
- uword right_edge;
+ uintptr_t right_edge;
if (index_start < index_end) {
// Left edge != right edge.
@@ -110,20 +110,20 @@ inline void SpaceBitmap<kAlignment>::VisitMarkedRange(uintptr_t visit_begin, uin
const size_t shift = CTZ(left_edge);
mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
visitor(obj);
- left_edge ^= (static_cast<uword>(1)) << shift;
+ left_edge ^= (static_cast<uintptr_t>(1)) << shift;
} while (left_edge != 0);
}
// Traverse the middle, full part.
for (size_t i = index_start + 1; i < index_end; ++i) {
- uword w = bitmap_begin_[i];
+ uintptr_t w = bitmap_begin_[i];
if (w != 0) {
const uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
do {
const size_t shift = CTZ(w);
mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
visitor(obj);
- w ^= (static_cast<uword>(1)) << shift;
+ w ^= (static_cast<uintptr_t>(1)) << shift;
} while (w != 0);
}
}
@@ -142,14 +142,14 @@ inline void SpaceBitmap<kAlignment>::VisitMarkedRange(uintptr_t visit_begin, uin
}
// Right edge handling.
- right_edge &= ((static_cast<uword>(1) << bit_end) - 1);
+ right_edge &= ((static_cast<uintptr_t>(1) << bit_end) - 1);
if (right_edge != 0) {
const uintptr_t ptr_base = IndexToOffset(index_end) + heap_begin_;
do {
const size_t shift = CTZ(right_edge);
mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
visitor(obj);
- right_edge ^= (static_cast<uword>(1)) << shift;
+ right_edge ^= (static_cast<uintptr_t>(1)) << shift;
} while (right_edge != 0);
}
#endif
@@ -161,10 +161,10 @@ inline bool SpaceBitmap<kAlignment>::Modify(const mirror::Object* obj) {
DCHECK_GE(addr, heap_begin_);
const uintptr_t offset = addr - heap_begin_;
const size_t index = OffsetToIndex(offset);
- const uword mask = OffsetToMask(offset);
- DCHECK_LT(index, bitmap_size_ / kWordSize) << " bitmap_size_ = " << bitmap_size_;
- uword* address = &bitmap_begin_[index];
- uword old_word = *address;
+ const uintptr_t mask = OffsetToMask(offset);
+ DCHECK_LT(index, bitmap_size_ / sizeof(intptr_t)) << " bitmap_size_ = " << bitmap_size_;
+ uintptr_t* address = &bitmap_begin_[index];
+ uintptr_t old_word = *address;
if (kSetBit) {
*address = old_word | mask;
} else {
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 39d1f9e132..feb9565ccf 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -29,21 +29,21 @@ namespace accounting {
template<size_t kAlignment>
size_t SpaceBitmap<kAlignment>::ComputeBitmapSize(uint64_t capacity) {
- const uint64_t kBytesCoveredPerWord = kAlignment * kBitsPerWord;
- return (RoundUp(capacity, kBytesCoveredPerWord) / kBytesCoveredPerWord) * kWordSize;
+ const uint64_t kBytesCoveredPerWord = kAlignment * kBitsPerIntPtrT;
+ return (RoundUp(capacity, kBytesCoveredPerWord) / kBytesCoveredPerWord) * sizeof(intptr_t);
}
template<size_t kAlignment>
SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::CreateFromMemMap(
- const std::string& name, MemMap* mem_map, byte* heap_begin, size_t heap_capacity) {
+ const std::string& name, MemMap* mem_map, uint8_t* heap_begin, size_t heap_capacity) {
CHECK(mem_map != nullptr);
- uword* bitmap_begin = reinterpret_cast<uword*>(mem_map->Begin());
+ uintptr_t* bitmap_begin = reinterpret_cast<uintptr_t*>(mem_map->Begin());
const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
return new SpaceBitmap(name, mem_map, bitmap_begin, bitmap_size, heap_begin);
}
template<size_t kAlignment>
-SpaceBitmap<kAlignment>::SpaceBitmap(const std::string& name, MemMap* mem_map, uword* bitmap_begin,
+SpaceBitmap<kAlignment>::SpaceBitmap(const std::string& name, MemMap* mem_map, uintptr_t* bitmap_begin,
size_t bitmap_size, const void* heap_begin)
: mem_map_(mem_map), bitmap_begin_(bitmap_begin), bitmap_size_(bitmap_size),
heap_begin_(reinterpret_cast<uintptr_t>(heap_begin)),
@@ -57,7 +57,7 @@ SpaceBitmap<kAlignment>::~SpaceBitmap() {}
template<size_t kAlignment>
SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::Create(
- const std::string& name, byte* heap_begin, size_t heap_capacity) {
+ const std::string& name, uint8_t* heap_begin, size_t heap_capacity) {
// Round up since heap_capacity is not necessarily a multiple of kAlignment * kBitsPerWord.
const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
std::string error_msg;
@@ -72,8 +72,8 @@ SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::Create(
template<size_t kAlignment>
void SpaceBitmap<kAlignment>::SetHeapLimit(uintptr_t new_end) {
- DCHECK(IsAligned<kBitsPerWord * kAlignment>(new_end));
- size_t new_size = OffsetToIndex(new_end - heap_begin_) * kWordSize;
+ DCHECK(IsAligned<kBitsPerIntPtrT * kAlignment>(new_end));
+ size_t new_size = OffsetToIndex(new_end - heap_begin_) * sizeof(intptr_t);
if (new_size < bitmap_size_) {
bitmap_size_ = new_size;
}
@@ -97,7 +97,7 @@ void SpaceBitmap<kAlignment>::Clear() {
template<size_t kAlignment>
void SpaceBitmap<kAlignment>::CopyFrom(SpaceBitmap* source_bitmap) {
DCHECK_EQ(Size(), source_bitmap->Size());
- std::copy(source_bitmap->Begin(), source_bitmap->Begin() + source_bitmap->Size() / kWordSize, Begin());
+ std::copy(source_bitmap->Begin(), source_bitmap->Begin() + source_bitmap->Size() / sizeof(intptr_t), Begin());
}
template<size_t kAlignment>
@@ -106,16 +106,16 @@ void SpaceBitmap<kAlignment>::Walk(ObjectCallback* callback, void* arg) {
CHECK(callback != NULL);
uintptr_t end = OffsetToIndex(HeapLimit() - heap_begin_ - 1);
- uword* bitmap_begin = bitmap_begin_;
+ uintptr_t* bitmap_begin = bitmap_begin_;
for (uintptr_t i = 0; i <= end; ++i) {
- uword w = bitmap_begin[i];
+ uintptr_t w = bitmap_begin[i];
if (w != 0) {
uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
do {
const size_t shift = CTZ(w);
mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
(*callback)(obj, arg);
- w ^= (static_cast<uword>(1)) << shift;
+ w ^= (static_cast<uintptr_t>(1)) << shift;
} while (w != 0);
}
}
@@ -139,7 +139,7 @@ void SpaceBitmap<kAlignment>::SweepWalk(const SpaceBitmap<kAlignment>& live_bitm
}
// TODO: rewrite the callbacks to accept a std::vector<mirror::Object*> rather than a mirror::Object**?
- constexpr size_t buffer_size = kWordSize * kBitsPerWord;
+ constexpr size_t buffer_size = sizeof(intptr_t) * kBitsPerIntPtrT;
#ifdef __LP64__
// Heap-allocate for smaller stack frame.
std::unique_ptr<mirror::Object*[]> pointer_buf_ptr(new mirror::Object*[buffer_size]);
@@ -152,21 +152,21 @@ void SpaceBitmap<kAlignment>::SweepWalk(const SpaceBitmap<kAlignment>& live_bitm
size_t start = OffsetToIndex(sweep_begin - live_bitmap.heap_begin_);
size_t end = OffsetToIndex(sweep_end - live_bitmap.heap_begin_ - 1);
- CHECK_LT(end, live_bitmap.Size() / kWordSize);
- uword* live = live_bitmap.bitmap_begin_;
- uword* mark = mark_bitmap.bitmap_begin_;
+ CHECK_LT(end, live_bitmap.Size() / sizeof(intptr_t));
+ uintptr_t* live = live_bitmap.bitmap_begin_;
+ uintptr_t* mark = mark_bitmap.bitmap_begin_;
for (size_t i = start; i <= end; i++) {
- uword garbage = live[i] & ~mark[i];
+ uintptr_t garbage = live[i] & ~mark[i];
if (UNLIKELY(garbage != 0)) {
uintptr_t ptr_base = IndexToOffset(i) + live_bitmap.heap_begin_;
do {
const size_t shift = CTZ(garbage);
- garbage ^= (static_cast<uword>(1)) << shift;
+ garbage ^= (static_cast<uintptr_t>(1)) << shift;
*pb++ = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
} while (garbage != 0);
// Make sure that there are always enough slots available for an
// entire word of one bits.
- if (pb >= &pointer_buf[buffer_size - kBitsPerWord]) {
+ if (pb >= &pointer_buf[buffer_size - kBitsPerIntPtrT]) {
(*callback)(pb - &pointer_buf[0], &pointer_buf[0], arg);
pb = &pointer_buf[0];
}
@@ -245,21 +245,21 @@ void SpaceBitmap<kAlignment>::WalkFieldsInOrder(SpaceBitmap<kAlignment>* visited
template<size_t kAlignment>
void SpaceBitmap<kAlignment>::InOrderWalk(ObjectCallback* callback, void* arg) {
std::unique_ptr<SpaceBitmap<kAlignment>> visited(
- Create("bitmap for in-order walk", reinterpret_cast<byte*>(heap_begin_),
- IndexToOffset(bitmap_size_ / kWordSize)));
+ Create("bitmap for in-order walk", reinterpret_cast<uint8_t*>(heap_begin_),
+ IndexToOffset(bitmap_size_ / sizeof(intptr_t))));
CHECK(bitmap_begin_ != nullptr);
CHECK(callback != nullptr);
- uintptr_t end = Size() / kWordSize;
+ uintptr_t end = Size() / sizeof(intptr_t);
for (uintptr_t i = 0; i < end; ++i) {
// Need uint for unsigned shift.
- uword w = bitmap_begin_[i];
+ uintptr_t w = bitmap_begin_[i];
if (UNLIKELY(w != 0)) {
uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
while (w != 0) {
const size_t shift = CTZ(w);
mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
WalkFieldsInOrder(visited.get(), callback, obj, arg);
- w ^= (static_cast<uword>(1)) << shift;
+ w ^= (static_cast<uintptr_t>(1)) << shift;
}
}
}
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index f72b30f81b..e73166b091 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -45,13 +45,13 @@ class SpaceBitmap {
// Initialize a space bitmap so that it points to a bitmap large enough to cover a heap at
// heap_begin of heap_capacity bytes, where objects are guaranteed to be kAlignment-aligned.
- static SpaceBitmap* Create(const std::string& name, byte* heap_begin, size_t heap_capacity);
+ static SpaceBitmap* Create(const std::string& name, uint8_t* heap_begin, size_t heap_capacity);
// Initialize a space bitmap using the provided mem_map as the live bits. Takes ownership of the
// mem map. The address range covered starts at heap_begin and is of size equal to heap_capacity.
// Objects are kAlignement-aligned.
static SpaceBitmap* CreateFromMemMap(const std::string& name, MemMap* mem_map,
- byte* heap_begin, size_t heap_capacity);
+ uint8_t* heap_begin, size_t heap_capacity);
~SpaceBitmap();
@@ -59,17 +59,17 @@ class SpaceBitmap {
// <index> is the index of .bits that contains the bit representing
// <offset>.
static constexpr size_t OffsetToIndex(size_t offset) {
- return offset / kAlignment / kBitsPerWord;
+ return offset / kAlignment / kBitsPerIntPtrT;
}
template<typename T>
static constexpr T IndexToOffset(T index) {
- return static_cast<T>(index * kAlignment * kBitsPerWord);
+ return static_cast<T>(index * kAlignment * kBitsPerIntPtrT);
}
// Bits are packed in the obvious way.
- static constexpr uword OffsetToMask(uintptr_t offset) {
- return (static_cast<size_t>(1)) << ((offset / kAlignment) % kBitsPerWord);
+ static constexpr uintptr_t OffsetToMask(uintptr_t offset) {
+ return (static_cast<size_t>(1)) << ((offset / kAlignment) % kBitsPerIntPtrT);
}
bool Set(const mirror::Object* obj) ALWAYS_INLINE {
@@ -95,7 +95,7 @@ class SpaceBitmap {
// bitmap.
const uintptr_t offset = reinterpret_cast<uintptr_t>(obj) - heap_begin_;
const size_t index = OffsetToIndex(offset);
- return index < bitmap_size_ / kWordSize;
+ return index < bitmap_size_ / sizeof(intptr_t);
}
void VisitRange(uintptr_t base, uintptr_t max, ObjectCallback* callback, void* arg) const;
@@ -146,7 +146,7 @@ class SpaceBitmap {
void CopyFrom(SpaceBitmap* source_bitmap);
// Starting address of our internal storage.
- uword* Begin() {
+ uintptr_t* Begin() {
return bitmap_begin_;
}
@@ -157,7 +157,7 @@ class SpaceBitmap {
// Size in bytes of the memory that the bitmaps spans.
uint64_t HeapSize() const {
- return IndexToOffset<uint64_t>(Size() / kWordSize);
+ return IndexToOffset<uint64_t>(Size() / sizeof(intptr_t));
}
uintptr_t HeapBegin() const {
@@ -192,7 +192,7 @@ class SpaceBitmap {
private:
// TODO: heap_end_ is initialized so that the heap bitmap is empty, this doesn't require the -1,
// however, we document that this is expected on heap_end_
- SpaceBitmap(const std::string& name, MemMap* mem_map, uword* bitmap_begin, size_t bitmap_size,
+ SpaceBitmap(const std::string& name, MemMap* mem_map, uintptr_t* bitmap_begin, size_t bitmap_size,
const void* heap_begin);
// Helper function for computing bitmap size based on a 64 bit capacity.
@@ -214,7 +214,7 @@ class SpaceBitmap {
std::unique_ptr<MemMap> mem_map_;
// This bitmap itself, word sized for efficiency in scanning.
- uword* const bitmap_begin_;
+ uintptr_t* const bitmap_begin_;
// Size of this bitmap.
size_t bitmap_size_;
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index a30bb253e3..40856fc9ef 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -30,7 +30,7 @@ namespace accounting {
class SpaceBitmapTest : public CommonRuntimeTest {};
TEST_F(SpaceBitmapTest, Init) {
- byte* heap_begin = reinterpret_cast<byte*>(0x10000000);
+ uint8_t* heap_begin = reinterpret_cast<uint8_t*>(0x10000000);
size_t heap_capacity = 16 * MB;
std::unique_ptr<ContinuousSpaceBitmap> space_bitmap(
ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
@@ -51,21 +51,21 @@ class BitmapVerify {
EXPECT_EQ(bitmap_->Test(obj), ((reinterpret_cast<uintptr_t>(obj) & 0xF) != 0));
}
- ContinuousSpaceBitmap* bitmap_;
+ ContinuousSpaceBitmap* const bitmap_;
const mirror::Object* begin_;
const mirror::Object* end_;
};
TEST_F(SpaceBitmapTest, ScanRange) {
- byte* heap_begin = reinterpret_cast<byte*>(0x10000000);
+ uint8_t* heap_begin = reinterpret_cast<uint8_t*>(0x10000000);
size_t heap_capacity = 16 * MB;
std::unique_ptr<ContinuousSpaceBitmap> space_bitmap(
ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
EXPECT_TRUE(space_bitmap.get() != NULL);
- // Set all the odd bits in the first BitsPerWord * 3 to one.
- for (size_t j = 0; j < kBitsPerWord * 3; ++j) {
+ // Set all the odd bits in the first BitsPerIntPtrT * 3 to one.
+ for (size_t j = 0; j < kBitsPerIntPtrT * 3; ++j) {
const mirror::Object* obj =
reinterpret_cast<mirror::Object*>(heap_begin + j * kObjectAlignment);
if (reinterpret_cast<uintptr_t>(obj) & 0xF) {
@@ -76,10 +76,10 @@ TEST_F(SpaceBitmapTest, ScanRange) {
// possible length up to a maximum of kBitsPerWord * 2 - 1 bits.
// This handles all the cases, having runs which start and end on the same word, and different
// words.
- for (size_t i = 0; i < static_cast<size_t>(kBitsPerWord); ++i) {
+ for (size_t i = 0; i < static_cast<size_t>(kBitsPerIntPtrT); ++i) {
mirror::Object* start =
reinterpret_cast<mirror::Object*>(heap_begin + i * kObjectAlignment);
- for (size_t j = 0; j < static_cast<size_t>(kBitsPerWord * 2); ++j) {
+ for (size_t j = 0; j < static_cast<size_t>(kBitsPerIntPtrT * 2); ++j) {
mirror::Object* end =
reinterpret_cast<mirror::Object*>(heap_begin + (i + j) * kObjectAlignment);
BitmapVerify(space_bitmap.get(), start, end);
@@ -95,7 +95,7 @@ class SimpleCounter {
(*count_)++;
}
- size_t* count_;
+ size_t* const count_;
};
class RandGen {
@@ -112,7 +112,7 @@ class RandGen {
template <size_t kAlignment>
void RunTest() NO_THREAD_SAFETY_ANALYSIS {
- byte* heap_begin = reinterpret_cast<byte*>(0x10000000);
+ uint8_t* heap_begin = reinterpret_cast<uint8_t*>(0x10000000);
size_t heap_capacity = 16 * MB;
// Seed with 0x1234 for reproducability.
diff --git a/runtime/gc/allocator/rosalloc-inl.h b/runtime/gc/allocator/rosalloc-inl.h
index c69ca48de8..dd419a4a9c 100644
--- a/runtime/gc/allocator/rosalloc-inl.h
+++ b/runtime/gc/allocator/rosalloc-inl.h
@@ -36,7 +36,7 @@ inline ALWAYS_INLINE void* RosAlloc::Alloc(Thread* self, size_t size, size_t* by
}
// Check if the returned memory is really all zero.
if (kCheckZeroMemory && m != nullptr) {
- byte* bytes = reinterpret_cast<byte*>(m);
+ uint8_t* bytes = reinterpret_cast<uint8_t*>(m);
for (size_t i = 0; i < size; ++i) {
DCHECK_EQ(bytes[i], 0);
}
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index a7e5e7487b..0cea89dc17 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -49,7 +49,7 @@ RosAlloc::Run* RosAlloc::dedicated_full_run_ =
RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
PageReleaseMode page_release_mode, size_t page_release_size_threshold)
- : base_(reinterpret_cast<byte*>(base)), footprint_(capacity),
+ : base_(reinterpret_cast<uint8_t*>(base)), footprint_(capacity),
capacity_(capacity), max_capacity_(max_capacity),
lock_("rosalloc global lock", kRosAllocGlobalLock),
bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock),
@@ -107,7 +107,7 @@ RosAlloc::~RosAlloc() {
}
}
-void* RosAlloc::AllocPages(Thread* self, size_t num_pages, byte page_map_type) {
+void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type) {
lock_.AssertHeld(self);
DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
FreePageRun* res = NULL;
@@ -128,7 +128,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, byte page_map_type) {
}
if (req_byte_size < fpr_byte_size) {
// Split.
- FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<byte*>(fpr) + req_byte_size);
+ FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
if (kIsDebugBuild) {
remainder->magic_num_ = kMagicNumFree;
}
@@ -226,7 +226,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, byte page_map_type) {
}
if (req_byte_size < fpr_byte_size) {
// Split if there's a remainder.
- FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<byte*>(fpr) + req_byte_size);
+ FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
if (kIsDebugBuild) {
remainder->magic_num_ = kMagicNumFree;
}
@@ -290,9 +290,9 @@ size_t RosAlloc::FreePages(Thread* self, void* ptr, bool already_zero) {
lock_.AssertHeld(self);
size_t pm_idx = ToPageMapIndex(ptr);
DCHECK_LT(pm_idx, page_map_size_);
- byte pm_type = page_map_[pm_idx];
+ uint8_t pm_type = page_map_[pm_idx];
DCHECK(pm_type == kPageMapRun || pm_type == kPageMapLargeObject);
- byte pm_part_type;
+ uint8_t pm_part_type;
switch (pm_type) {
case kPageMapRun:
pm_part_type = kPageMapRunPart;
@@ -319,8 +319,8 @@ size_t RosAlloc::FreePages(Thread* self, void* ptr, bool already_zero) {
const size_t byte_size = num_pages * kPageSize;
if (already_zero) {
if (kCheckZeroMemory) {
- const uword* word_ptr = reinterpret_cast<uword*>(ptr);
- for (size_t i = 0; i < byte_size / sizeof(uword); ++i) {
+ const uintptr_t* word_ptr = reinterpret_cast<uintptr_t*>(ptr);
+ for (size_t i = 0; i < byte_size / sizeof(uintptr_t); ++i) {
CHECK_EQ(word_ptr[i], 0U) << "words don't match at index " << i;
}
}
@@ -473,9 +473,9 @@ void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_alloca
}
// Check if the returned memory is really all zero.
if (kCheckZeroMemory) {
- CHECK_EQ(total_bytes % sizeof(uword), 0U);
- const uword* words = reinterpret_cast<uword*>(r);
- for (size_t i = 0; i < total_bytes / sizeof(uword); ++i) {
+ CHECK_EQ(total_bytes % sizeof(uintptr_t), 0U);
+ const uintptr_t* words = reinterpret_cast<uintptr_t*>(r);
+ for (size_t i = 0; i < total_bytes / sizeof(uintptr_t); ++i) {
CHECK_EQ(words[i], 0U);
}
}
@@ -490,7 +490,7 @@ size_t RosAlloc::FreeInternal(Thread* self, void* ptr) {
{
MutexLock mu(self, lock_);
DCHECK_LT(pm_idx, page_map_size_);
- byte page_map_entry = page_map_[pm_idx];
+ uint8_t page_map_entry = page_map_[pm_idx];
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::FreeInternal() : " << std::hex << ptr << ", pm_idx=" << std::dec << pm_idx
<< ", page_map_entry=" << static_cast<int>(page_map_entry);
@@ -507,13 +507,12 @@ size_t RosAlloc::FreeInternal(Thread* self, void* ptr) {
--pm_idx;
DCHECK_LT(pm_idx, capacity_ / kPageSize);
} while (page_map_[pm_idx] != kPageMapRun);
- // Fall-through.
+ FALLTHROUGH_INTENDED;
case kPageMapRun:
run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
DCHECK_EQ(run->magic_num_, kMagicNum);
break;
case kPageMapReleased:
- // Fall-through.
case kPageMapEmpty:
LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
return 0;
@@ -557,7 +556,7 @@ RosAlloc::Run* RosAlloc::AllocRun(Thread* self, size_t idx) {
const size_t num_of_slots = numOfSlots[idx];
const size_t bracket_size = bracketSizes[idx];
const size_t num_of_bytes = num_of_slots * bracket_size;
- byte* begin = reinterpret_cast<byte*>(new_run) + headerSizes[idx];
+ uint8_t* begin = reinterpret_cast<uint8_t*>(new_run) + headerSizes[idx];
for (size_t i = 0; i < num_of_bytes; i += kPrefetchStride) {
__builtin_prefetch(begin + i);
}
@@ -869,7 +868,7 @@ inline void* RosAlloc::Run::AllocSlot() {
DCHECK_EQ(*alloc_bitmap_ptr & mask, 0U);
*alloc_bitmap_ptr |= mask;
DCHECK_NE(*alloc_bitmap_ptr & mask, 0U);
- byte* slot_addr = reinterpret_cast<byte*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
+ uint8_t* slot_addr = reinterpret_cast<uint8_t*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
<< ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
@@ -889,10 +888,10 @@ inline void* RosAlloc::Run::AllocSlot() {
void RosAlloc::Run::FreeSlot(void* ptr) {
DCHECK(!IsThreadLocal());
- const byte idx = size_bracket_idx_;
+ const uint8_t idx = size_bracket_idx_;
const size_t bracket_size = bracketSizes[idx];
- const size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
- - (reinterpret_cast<byte*>(this) + headerSizes[idx]);
+ const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
+ - (reinterpret_cast<uint8_t*>(this) + headerSizes[idx]);
DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
size_t slot_idx = offset_from_slot_base / bracket_size;
DCHECK_LT(slot_idx, numOfSlots[idx]);
@@ -1001,9 +1000,9 @@ inline size_t RosAlloc::Run::MarkBulkFreeBitMap(void* ptr) {
inline size_t RosAlloc::Run::MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base,
const char* caller_name) {
- const byte idx = size_bracket_idx_;
- const size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
- - (reinterpret_cast<byte*>(this) + headerSizes[idx]);
+ const uint8_t idx = size_bracket_idx_;
+ const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
+ - (reinterpret_cast<uint8_t*>(this) + headerSizes[idx]);
const size_t bracket_size = bracketSizes[idx];
memset(ptr, 0, bracket_size);
DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
@@ -1037,7 +1036,7 @@ inline uint32_t RosAlloc::Run::GetBitmapLastVectorMask(size_t num_slots, size_t
}
inline bool RosAlloc::Run::IsAllFree() {
- const byte idx = size_bracket_idx_;
+ const uint8_t idx = size_bracket_idx_;
const size_t num_slots = numOfSlots[idx];
const size_t num_vec = NumberOfBitmapVectors();
DCHECK_NE(num_vec, 0U);
@@ -1095,13 +1094,13 @@ inline void RosAlloc::Run::SetAllocBitMapBitsForInvalidSlots() {
}
inline void RosAlloc::Run::ZeroHeader() {
- const byte idx = size_bracket_idx_;
+ const uint8_t idx = size_bracket_idx_;
memset(this, 0, headerSizes[idx]);
}
inline void RosAlloc::Run::ZeroData() {
- const byte idx = size_bracket_idx_;
- byte* slot_begin = reinterpret_cast<byte*>(this) + headerSizes[idx];
+ const uint8_t idx = size_bracket_idx_;
+ uint8_t* slot_begin = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
memset(slot_begin, 0, numOfSlots[idx] * bracketSizes[idx]);
}
@@ -1114,10 +1113,10 @@ inline void RosAlloc::Run::FillAllocBitMap() {
void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
void* arg) {
size_t idx = size_bracket_idx_;
- byte* slot_base = reinterpret_cast<byte*>(this) + headerSizes[idx];
+ uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
size_t num_slots = numOfSlots[idx];
size_t bracket_size = IndexToBracketSize(idx);
- DCHECK_EQ(slot_base + num_slots * bracket_size, reinterpret_cast<byte*>(this) + numOfPages[idx] * kPageSize);
+ DCHECK_EQ(slot_base + num_slots * bracket_size, reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize);
size_t num_vec = RoundUp(num_slots, 32) / 32;
size_t slots = 0;
for (size_t v = 0; v < num_vec; v++, slots += 32) {
@@ -1126,7 +1125,7 @@ void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size
size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
for (size_t i = 0; i < end; ++i) {
bool is_allocated = ((vec >> i) & 0x1) != 0;
- byte* slot_addr = slot_base + (slots + i) * bracket_size;
+ uint8_t* slot_addr = slot_base + (slots + i) * bracket_size;
if (is_allocated) {
handler(slot_addr, slot_addr + bracket_size, bracket_size, arg);
} else {
@@ -1169,7 +1168,7 @@ size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
Run* run = nullptr;
if (kReadPageMapEntryWithoutLockInBulkFree) {
// Read the page map entries without locking the lock.
- byte page_map_entry = page_map_[pm_idx];
+ uint8_t page_map_entry = page_map_[pm_idx];
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
<< std::dec << pm_idx
@@ -1196,7 +1195,7 @@ size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
// Read the page map entries with a lock.
MutexLock mu(self, lock_);
DCHECK_LT(pm_idx, page_map_size_);
- byte page_map_entry = page_map_[pm_idx];
+ uint8_t page_map_entry = page_map_[pm_idx];
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
<< std::dec << pm_idx
@@ -1354,7 +1353,7 @@ std::string RosAlloc::DumpPageMap() {
size_t remaining_curr_fpr_size = 0;
size_t num_running_empty_pages = 0;
for (size_t i = 0; i < end; ++i) {
- byte pm = page_map_[i];
+ uint8_t pm = page_map_[i];
switch (pm) {
case kPageMapReleased:
// Fall-through.
@@ -1472,8 +1471,8 @@ size_t RosAlloc::UsableSize(void* ptr) {
Run* run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
DCHECK_EQ(run->magic_num_, kMagicNum);
size_t idx = run->size_bracket_idx_;
- size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
- - (reinterpret_cast<byte*>(run) + headerSizes[idx]);
+ size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
+ - (reinterpret_cast<uint8_t*>(run) + headerSizes[idx]);
DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
return IndexToBracketSize(idx);
}
@@ -1503,8 +1502,8 @@ bool RosAlloc::Trim() {
size_t new_num_of_pages = new_footprint / kPageSize;
DCHECK_GE(page_map_size_, new_num_of_pages);
// Zero out the tail of the page map.
- byte* zero_begin = const_cast<byte*>(page_map_) + new_num_of_pages;
- byte* madvise_begin = AlignUp(zero_begin, kPageSize);
+ uint8_t* zero_begin = const_cast<uint8_t*>(page_map_) + new_num_of_pages;
+ uint8_t* madvise_begin = AlignUp(zero_begin, kPageSize);
DCHECK_LE(madvise_begin, page_map_mem_map_->End());
size_t madvise_size = page_map_mem_map_->End() - madvise_begin;
if (madvise_size > 0) {
@@ -1544,7 +1543,7 @@ void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_by
size_t pm_end = page_map_size_;
size_t i = 0;
while (i < pm_end) {
- byte pm = page_map_[i];
+ uint8_t pm = page_map_[i];
switch (pm) {
case kPageMapReleased:
// Fall-through.
@@ -1558,9 +1557,9 @@ void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_by
if (kIsDebugBuild) {
// In the debug build, the first page of a free page run
// contains a magic number for debugging. Exclude it.
- start = reinterpret_cast<byte*>(fpr) + kPageSize;
+ start = reinterpret_cast<uint8_t*>(fpr) + kPageSize;
}
- void* end = reinterpret_cast<byte*>(fpr) + fpr_size;
+ void* end = reinterpret_cast<uint8_t*>(fpr) + fpr_size;
handler(start, end, 0, arg);
size_t num_pages = fpr_size / kPageSize;
if (kIsDebugBuild) {
@@ -1879,7 +1878,7 @@ void RosAlloc::Verify() {
size_t pm_end = page_map_size_;
size_t i = 0;
while (i < pm_end) {
- byte pm = page_map_[i];
+ uint8_t pm = page_map_[i];
switch (pm) {
case kPageMapReleased:
// Fall-through.
@@ -1994,13 +1993,13 @@ void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc) {
DCHECK_EQ(magic_num_, kMagicNum) << "Bad magic number : " << Dump();
const size_t idx = size_bracket_idx_;
CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
- byte* slot_base = reinterpret_cast<byte*>(this) + headerSizes[idx];
+ uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
const size_t num_slots = numOfSlots[idx];
const size_t num_vec = RoundUp(num_slots, 32) / 32;
CHECK_GT(num_vec, 0U);
size_t bracket_size = IndexToBracketSize(idx);
CHECK_EQ(slot_base + num_slots * bracket_size,
- reinterpret_cast<byte*>(this) + numOfPages[idx] * kPageSize)
+ reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize)
<< "Mismatch in the end address of the run " << Dump();
// Check that the bulk free bitmap is clean. It's only used during BulkFree().
CHECK(IsBulkFreeBitmapClean()) << "The bulk free bit map isn't clean " << Dump();
@@ -2084,7 +2083,7 @@ void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc) {
// thread local free bitmap.
bool is_thread_local_freed = IsThreadLocal() && ((thread_local_free_vec >> i) & 0x1) != 0;
if (is_allocated && !is_thread_local_freed) {
- byte* slot_addr = slot_base + (slots + i) * bracket_size;
+ uint8_t* slot_addr = slot_base + (slots + i) * bracket_size;
mirror::Object* obj = reinterpret_cast<mirror::Object*>(slot_addr);
size_t obj_size = obj->SizeOf();
CHECK_LE(obj_size, kLargeSizeThreshold)
@@ -2108,7 +2107,7 @@ size_t RosAlloc::ReleasePages() {
while (i < page_map_size_) {
// Reading the page map without a lock is racy but the race is benign since it should only
// result in occasionally not releasing pages which we could release.
- byte pm = page_map_[i];
+ uint8_t pm = page_map_[i];
switch (pm) {
case kPageMapReleased:
// Fall through.
@@ -2129,7 +2128,7 @@ size_t RosAlloc::ReleasePages() {
if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
size_t fpr_size = fpr->ByteSize(this);
DCHECK(IsAligned<kPageSize>(fpr_size));
- byte* start = reinterpret_cast<byte*>(fpr);
+ uint8_t* start = reinterpret_cast<uint8_t*>(fpr);
reclaimed_bytes += ReleasePageRange(start, start + fpr_size);
size_t pages = fpr_size / kPageSize;
CHECK_GT(pages, 0U) << "Infinite loop probable";
@@ -2138,7 +2137,7 @@ size_t RosAlloc::ReleasePages() {
break;
}
}
- // Fall through.
+ FALLTHROUGH_INTENDED;
}
case kPageMapLargeObject: // Fall through.
case kPageMapLargeObjectPart: // Fall through.
@@ -2154,7 +2153,7 @@ size_t RosAlloc::ReleasePages() {
return reclaimed_bytes;
}
-size_t RosAlloc::ReleasePageRange(byte* start, byte* end) {
+size_t RosAlloc::ReleasePageRange(uint8_t* start, uint8_t* end) {
DCHECK_ALIGNED(start, kPageSize);
DCHECK_ALIGNED(end, kPageSize);
DCHECK_LT(start, end);
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 2fbd97a8d5..8374ff70b2 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -44,13 +44,13 @@ class RosAlloc {
// Represents a run of free pages.
class FreePageRun {
public:
- byte magic_num_; // The magic number used for debugging only.
+ uint8_t magic_num_; // The magic number used for debugging only.
bool IsFree() const {
return !kIsDebugBuild || magic_num_ == kMagicNumFree;
}
size_t ByteSize(RosAlloc* rosalloc) const EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
- const byte* fpr_base = reinterpret_cast<const byte*>(this);
+ const uint8_t* fpr_base = reinterpret_cast<const uint8_t*>(this);
size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
size_t byte_size = rosalloc->free_page_run_size_map_[pm_idx];
DCHECK_GE(byte_size, static_cast<size_t>(0));
@@ -60,7 +60,7 @@ class RosAlloc {
void SetByteSize(RosAlloc* rosalloc, size_t byte_size)
EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
- byte* fpr_base = reinterpret_cast<byte*>(this);
+ uint8_t* fpr_base = reinterpret_cast<uint8_t*>(this);
size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
rosalloc->free_page_run_size_map_[pm_idx] = byte_size;
}
@@ -68,8 +68,8 @@ class RosAlloc {
return reinterpret_cast<void*>(this);
}
void* End(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
- byte* fpr_base = reinterpret_cast<byte*>(this);
- byte* end = fpr_base + ByteSize(rosalloc);
+ uint8_t* fpr_base = reinterpret_cast<uint8_t*>(this);
+ uint8_t* end = fpr_base + ByteSize(rosalloc);
return end;
}
bool IsLargerThanPageReleaseThreshold(RosAlloc* rosalloc)
@@ -78,7 +78,7 @@ class RosAlloc {
}
bool IsAtEndOfSpace(RosAlloc* rosalloc)
EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
- return reinterpret_cast<byte*>(this) + ByteSize(rosalloc) == rosalloc->base_ + rosalloc->footprint_;
+ return reinterpret_cast<uint8_t*>(this) + ByteSize(rosalloc) == rosalloc->base_ + rosalloc->footprint_;
}
bool ShouldReleasePages(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
switch (rosalloc->page_release_mode_) {
@@ -98,7 +98,7 @@ class RosAlloc {
}
}
void ReleasePages(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
- byte* start = reinterpret_cast<byte*>(this);
+ uint8_t* start = reinterpret_cast<uint8_t*>(this);
size_t byte_size = ByteSize(rosalloc);
DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
if (ShouldReleasePages(rosalloc)) {
@@ -151,10 +151,10 @@ class RosAlloc {
//
class Run {
public:
- byte magic_num_; // The magic number used for debugging.
- byte size_bracket_idx_; // The index of the size bracket of this run.
- byte is_thread_local_; // True if this run is used as a thread-local run.
- byte to_be_bulk_freed_; // Used within BulkFree() to flag a run that's involved with a bulk free.
+ uint8_t magic_num_; // The magic number used for debugging.
+ uint8_t size_bracket_idx_; // The index of the size bracket of this run.
+ uint8_t is_thread_local_; // True if this run is used as a thread-local run.
+ uint8_t to_be_bulk_freed_; // Used within BulkFree() to flag a run that's involved with a bulk free.
uint32_t first_search_vec_idx_; // The index of the first bitmap vector which may contain an available slot.
uint32_t alloc_bit_map_[0]; // The bit map that allocates if each slot is in use.
@@ -175,20 +175,20 @@ class RosAlloc {
// Returns the byte size of the header except for the bit maps.
static size_t fixed_header_size() {
Run temp;
- size_t size = reinterpret_cast<byte*>(&temp.alloc_bit_map_) - reinterpret_cast<byte*>(&temp);
+ size_t size = reinterpret_cast<uint8_t*>(&temp.alloc_bit_map_) - reinterpret_cast<uint8_t*>(&temp);
DCHECK_EQ(size, static_cast<size_t>(8));
return size;
}
// Returns the base address of the free bit map.
uint32_t* BulkFreeBitMap() {
- return reinterpret_cast<uint32_t*>(reinterpret_cast<byte*>(this) + bulkFreeBitMapOffsets[size_bracket_idx_]);
+ return reinterpret_cast<uint32_t*>(reinterpret_cast<uint8_t*>(this) + bulkFreeBitMapOffsets[size_bracket_idx_]);
}
// Returns the base address of the thread local free bit map.
uint32_t* ThreadLocalFreeBitMap() {
- return reinterpret_cast<uint32_t*>(reinterpret_cast<byte*>(this) + threadLocalFreeBitMapOffsets[size_bracket_idx_]);
+ return reinterpret_cast<uint32_t*>(reinterpret_cast<uint8_t*>(this) + threadLocalFreeBitMapOffsets[size_bracket_idx_]);
}
void* End() {
- return reinterpret_cast<byte*>(this) + kPageSize * numOfPages[size_bracket_idx_];
+ return reinterpret_cast<uint8_t*>(this) + kPageSize * numOfPages[size_bracket_idx_];
}
// Returns the number of bitmap words per run.
size_t NumberOfBitmapVectors() const {
@@ -259,13 +259,13 @@ class RosAlloc {
};
// The magic number for a run.
- static const byte kMagicNum = 42;
+ static constexpr uint8_t kMagicNum = 42;
// The magic number for free pages.
- static const byte kMagicNumFree = 43;
+ static constexpr uint8_t kMagicNumFree = 43;
// The number of size brackets. Sync this with the length of Thread::rosalloc_runs_.
- static const size_t kNumOfSizeBrackets = kNumRosAllocThreadLocalSizeBrackets;
+ static constexpr size_t kNumOfSizeBrackets = kNumRosAllocThreadLocalSizeBrackets;
// The number of smaller size brackets that are 16 bytes apart.
- static const size_t kNumOfQuantumSizeBrackets = 32;
+ static constexpr size_t kNumOfQuantumSizeBrackets = 32;
// The sizes (the slot sizes, in bytes) of the size brackets.
static size_t bracketSizes[kNumOfSizeBrackets];
// The numbers of pages that are used for runs for each size bracket.
@@ -356,13 +356,13 @@ class RosAlloc {
// address is page size aligned.
size_t ToPageMapIndex(const void* addr) const {
DCHECK(base_ <= addr && addr < base_ + capacity_);
- size_t byte_offset = reinterpret_cast<const byte*>(addr) - base_;
+ size_t byte_offset = reinterpret_cast<const uint8_t*>(addr) - base_;
DCHECK_EQ(byte_offset % static_cast<size_t>(kPageSize), static_cast<size_t>(0));
return byte_offset / kPageSize;
}
// Returns the page map index from an address with rounding.
size_t RoundDownToPageMapIndex(void* addr) const {
- DCHECK(base_ <= addr && addr < reinterpret_cast<byte*>(base_) + capacity_);
+ DCHECK(base_ <= addr && addr < reinterpret_cast<uint8_t*>(base_) + capacity_);
return (reinterpret_cast<uintptr_t>(addr) - reinterpret_cast<uintptr_t>(base_)) / kPageSize;
}
@@ -409,7 +409,7 @@ class RosAlloc {
private:
// The base address of the memory region that's managed by this allocator.
- byte* base_;
+ uint8_t* base_;
// The footprint in bytes of the currently allocated portion of the
// memory region.
@@ -455,7 +455,7 @@ class RosAlloc {
kPageMapLargeObjectPart, // The non-beginning part of a large object.
};
// The table that indicates what pages are currently used for.
- volatile byte* page_map_; // No GUARDED_BY(lock_) for kReadPageMapEntryWithoutLockInBulkFree.
+ volatile uint8_t* page_map_; // No GUARDED_BY(lock_) for kReadPageMapEntryWithoutLockInBulkFree.
size_t page_map_size_;
size_t max_page_map_size_;
std::unique_ptr<MemMap> page_map_mem_map_;
@@ -481,12 +481,12 @@ class RosAlloc {
const size_t page_release_size_threshold_;
// The base address of the memory region that's managed by this allocator.
- byte* Begin() { return base_; }
+ uint8_t* Begin() { return base_; }
// The end address of the memory region that's managed by this allocator.
- byte* End() { return base_ + capacity_; }
+ uint8_t* End() { return base_ + capacity_; }
// Page-granularity alloc/free
- void* AllocPages(Thread* self, size_t num_pages, byte page_map_type)
+ void* AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
// Returns how many bytes were freed.
size_t FreePages(Thread* self, void* ptr, bool already_zero) EXCLUSIVE_LOCKS_REQUIRED(lock_);
@@ -524,7 +524,7 @@ class RosAlloc {
void RevokeThreadUnsafeCurrentRuns();
// Release a range of pages.
- size_t ReleasePageRange(byte* start, byte* end) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ size_t ReleasePageRange(uint8_t* start, uint8_t* end) EXCLUSIVE_LOCKS_REQUIRED(lock_);
public:
RosAlloc(void* base, size_t capacity, size_t max_capacity,
@@ -580,7 +580,7 @@ class RosAlloc {
}
bool IsFreePage(size_t idx) const {
DCHECK_LT(idx, capacity_ / kPageSize);
- byte pm_type = page_map_[idx];
+ uint8_t pm_type = page_map_[idx];
return pm_type == kPageMapReleased || pm_type == kPageMapEmpty;
}
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index b3bed64c5e..6691b0f4fc 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -120,7 +120,7 @@ class CalculateObjectForwardingAddressVisitor {
void MarkCompact::CalculateObjectForwardingAddresses() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
// The bump pointer in the space where the next forwarding address will be.
- bump_pointer_ = reinterpret_cast<byte*>(space_->Begin());
+ bump_pointer_ = reinterpret_cast<uint8_t*>(space_->Begin());
// Visit all the marked objects in the bitmap.
CalculateObjectForwardingAddressVisitor visitor(this);
objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()),
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index bb85fa0a81..f40e8702d9 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -227,7 +227,7 @@ class MarkCompact : public GarbageCollector {
std::string collector_name_;
// The bump pointer in the space where the next forwarding address will be.
- byte* bump_pointer_;
+ uint8_t* bump_pointer_;
// How many live objects we have in the space.
size_t live_objects_in_space_;
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 930499a2fb..942b556a7e 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -689,7 +689,7 @@ class CardScanTask : public MarkStackTask<false> {
public:
CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
accounting::ContinuousSpaceBitmap* bitmap,
- byte* begin, byte* end, byte minimum_age, size_t mark_stack_size,
+ uint8_t* begin, uint8_t* end, uint8_t minimum_age, size_t mark_stack_size,
Object** mark_stack_obj)
: MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
bitmap_(bitmap),
@@ -700,9 +700,9 @@ class CardScanTask : public MarkStackTask<false> {
protected:
accounting::ContinuousSpaceBitmap* const bitmap_;
- byte* const begin_;
- byte* const end_;
- const byte minimum_age_;
+ uint8_t* const begin_;
+ uint8_t* const end_;
+ const uint8_t minimum_age_;
virtual void Finalize() {
delete this;
@@ -730,7 +730,7 @@ size_t MarkSweep::GetThreadCount(bool paused) const {
}
}
-void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
+void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) {
accounting::CardTable* card_table = GetHeap()->GetCardTable();
ThreadPool* thread_pool = GetHeap()->GetThreadPool();
size_t thread_count = GetThreadCount(paused);
@@ -754,8 +754,8 @@ void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
if (space->GetMarkBitmap() == nullptr) {
continue;
}
- byte* card_begin = space->Begin();
- byte* card_end = space->End();
+ uint8_t* card_begin = space->Begin();
+ uint8_t* card_end = space->End();
// Align up the end address. For example, the image space's end
// may not be card-size-aligned.
card_end = AlignUp(card_end, accounting::CardTable::kCardSize);
@@ -910,7 +910,7 @@ mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) {
return nullptr;
}
-void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) {
+void MarkSweep::RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age) {
ScanGrayObjects(paused, minimum_age);
ProcessMarkStack(paused);
}
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 2780099fe2..9ac110d687 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -112,7 +112,7 @@ class MarkSweep : public GarbageCollector {
virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Builds a mark stack with objects on dirty cards and recursively mark until it empties.
- void RecursiveMarkDirtyObjects(bool paused, byte minimum_age)
+ void RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -257,7 +257,7 @@ class MarkSweep : public GarbageCollector {
void PushOnMarkStack(mirror::Object* obj);
// Blackens objects grayed during a garbage collection.
- void ScanGrayObjects(bool paused, byte minimum_age)
+ void ScanGrayObjects(bool paused, uint8_t minimum_age)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index c8fa869a45..9459a3b829 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -437,15 +437,15 @@ static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size
return 0;
}
size_t saved_bytes = 0;
- byte* byte_dest = reinterpret_cast<byte*>(dest);
+ uint8_t* byte_dest = reinterpret_cast<uint8_t*>(dest);
if (kIsDebugBuild) {
for (size_t i = 0; i < size; ++i) {
CHECK_EQ(byte_dest[i], 0U);
}
}
// Process the start of the page. The page must already be dirty, don't bother with checking.
- const byte* byte_src = reinterpret_cast<const byte*>(src);
- const byte* limit = byte_src + size;
+ const uint8_t* byte_src = reinterpret_cast<const uint8_t*>(src);
+ const uint8_t* limit = byte_src + size;
size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest;
// Copy the bytes until the start of the next page.
memcpy(dest, src, page_remain);
@@ -481,7 +481,7 @@ mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
const size_t object_size = obj->SizeOf();
size_t bytes_allocated;
mirror::Object* forward_address = nullptr;
- if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) {
+ if (generational_ && reinterpret_cast<uint8_t*>(obj) < last_gc_to_space_end_) {
// If it's allocated before the last GC (older), move
// (pseudo-promote) it to the main free list space (as sort
// of an old generation.)
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 71a83f2624..1c4f1e418a 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -228,7 +228,7 @@ class SemiSpace : public GarbageCollector {
// Used for the generational mode. the end/top of the bump
// pointer space at the end of the last collection.
- byte* last_gc_to_space_end_;
+ uint8_t* last_gc_to_space_end_;
// Used for the generational mode. During a collection, keeps track
// of how many bytes of objects have been copied so far from the
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index d672510b1c..b9d69d55c7 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -199,7 +199,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
live_bitmap_.reset(new accounting::HeapBitmap(this));
mark_bitmap_.reset(new accounting::HeapBitmap(this));
// Requested begin for the alloc space, to follow the mapped image and oat files
- byte* requested_alloc_space_begin = nullptr;
+ uint8_t* requested_alloc_space_begin = nullptr;
if (!image_file_name.empty()) {
std::string error_msg;
space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name.c_str(),
@@ -209,7 +209,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
AddSpace(image_space);
// Oat files referenced by image files immediately follow them in memory, ensure alloc space
// isn't going to get in the middle
- byte* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd();
+ uint8_t* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd();
CHECK_GT(oat_file_end_addr, image_space->End());
requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
} else {
@@ -245,7 +245,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
}
std::unique_ptr<MemMap> main_mem_map_1;
std::unique_ptr<MemMap> main_mem_map_2;
- byte* request_begin = requested_alloc_space_begin;
+ uint8_t* request_begin = requested_alloc_space_begin;
if (request_begin != nullptr && separate_non_moving_space) {
request_begin += non_moving_space_capacity;
}
@@ -259,7 +259,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
non_moving_space_capacity, PROT_READ | PROT_WRITE, true, &error_str));
CHECK(non_moving_space_mem_map != nullptr) << error_str;
// Try to reserve virtual memory at a lower address if we have a separate non moving space.
- request_begin = reinterpret_cast<byte*>(300 * MB);
+ request_begin = reinterpret_cast<uint8_t*>(300 * MB);
}
// Attempt to create 2 mem maps at or after the requested begin.
main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0], request_begin, capacity_,
@@ -350,8 +350,8 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
// Compute heap capacity. Continuous spaces are sorted in order of Begin().
CHECK(!continuous_spaces_.empty());
// Relies on the spaces being sorted.
- byte* heap_begin = continuous_spaces_.front()->Begin();
- byte* heap_end = continuous_spaces_.back()->Limit();
+ uint8_t* heap_begin = continuous_spaces_.front()->Begin();
+ uint8_t* heap_end = continuous_spaces_.back()->Limit();
size_t heap_capacity = heap_end - heap_begin;
// Remove the main backup space since it slows down the GC to have unused extra spaces.
if (main_space_backup_.get() != nullptr) {
@@ -433,7 +433,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
}
}
-MemMap* Heap::MapAnonymousPreferredAddress(const char* name, byte* request_begin, size_t capacity,
+MemMap* Heap::MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin, size_t capacity,
int prot_flags, std::string* out_error_str) {
while (true) {
MemMap* map = MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity,
@@ -2265,7 +2265,7 @@ class VerifyReferenceVisitor {
accounting::CardTable* card_table = heap_->GetCardTable();
accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
accounting::ObjectStack* live_stack = heap_->live_stack_.get();
- byte* card_addr = card_table->CardFromAddr(obj);
+ uint8_t* card_addr = card_table->CardFromAddr(obj);
LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
<< offset << "\n card value = " << static_cast<int>(*card_addr);
if (heap_->IsValidObjectAddress(obj->GetClass())) {
@@ -2295,7 +2295,7 @@ class VerifyReferenceVisitor {
<< ") is not a valid heap address";
}
- card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj));
+ card_table->CheckAddrIsInCardTable(reinterpret_cast<const uint8_t*>(obj));
void* cover_begin = card_table->AddrFromCard(card_addr);
void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
accounting::CardTable::kCardSize);
@@ -2328,7 +2328,7 @@ class VerifyReferenceVisitor {
}
// Attempt to see if the card table missed the reference.
ScanVisitor scan_visitor;
- byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
+ uint8_t* byte_cover_begin = reinterpret_cast<uint8_t*>(card_table->AddrFromCard(card_addr));
card_table->Scan(bitmap, byte_cover_begin,
byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index faaea4077c..c09dca8c4d 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -94,7 +94,7 @@ namespace space {
class AgeCardVisitor {
public:
- byte operator()(byte card) const {
+ uint8_t operator()(uint8_t card) const {
if (card == accounting::CardTable::kCardDirty) {
return card - 1;
} else {
@@ -625,7 +625,7 @@ class Heap {
void FinishGC(Thread* self, collector::GcType gc_type) LOCKS_EXCLUDED(gc_complete_lock_);
// Create a mem map with a preferred base address.
- static MemMap* MapAnonymousPreferredAddress(const char* name, byte* request_begin,
+ static MemMap* MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin,
size_t capacity, int prot_flags,
std::string* out_error_str);
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index e6b5c75f4f..3106b4c913 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -62,7 +62,7 @@ TEST_F(HeapTest, GarbageCollectClassLinkerInit) {
}
TEST_F(HeapTest, HeapBitmapCapacityTest) {
- byte* heap_begin = reinterpret_cast<byte*>(0x1000);
+ uint8_t* heap_begin = reinterpret_cast<uint8_t*>(0x1000);
const size_t heap_capacity = kObjectAlignment * (sizeof(intptr_t) * 8 + 1);
std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap(
accounting::ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
index ee3c979b9a..9f1f9533d0 100644
--- a/runtime/gc/space/bump_pointer_space-inl.h
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -41,7 +41,7 @@ inline mirror::Object* BumpPointerSpace::AllocThreadUnsafe(Thread* self, size_t
size_t* usable_size) {
Locks::mutator_lock_->AssertExclusiveHeld(self);
num_bytes = RoundUp(num_bytes, kAlignment);
- byte* end = end_.LoadRelaxed();
+ uint8_t* end = end_.LoadRelaxed();
if (end + num_bytes > growth_end_) {
return nullptr;
}
@@ -59,8 +59,8 @@ inline mirror::Object* BumpPointerSpace::AllocThreadUnsafe(Thread* self, size_t
inline mirror::Object* BumpPointerSpace::AllocNonvirtualWithoutAccounting(size_t num_bytes) {
DCHECK(IsAligned<kAlignment>(num_bytes));
- byte* old_end;
- byte* new_end;
+ uint8_t* old_end;
+ uint8_t* new_end;
do {
old_end = end_.LoadRelaxed();
new_end = old_end + num_bytes;
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index fb6bbac6d0..8f42642b17 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -25,7 +25,7 @@ namespace gc {
namespace space {
BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity,
- byte* requested_begin) {
+ uint8_t* requested_begin) {
capacity = RoundUp(capacity, kPageSize);
std::string error_msg;
std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity,
@@ -42,7 +42,7 @@ BumpPointerSpace* BumpPointerSpace::CreateFromMemMap(const std::string& name, Me
return new BumpPointerSpace(name, mem_map);
}
-BumpPointerSpace::BumpPointerSpace(const std::string& name, byte* begin, byte* limit)
+BumpPointerSpace::BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit)
: ContinuousMemMapAllocSpace(name, nullptr, begin, begin, limit,
kGcRetentionPolicyAlwaysCollect),
growth_end_(limit),
@@ -134,12 +134,12 @@ void BumpPointerSpace::UpdateMainBlock() {
}
// Returns the start of the storage.
-byte* BumpPointerSpace::AllocBlock(size_t bytes) {
+uint8_t* BumpPointerSpace::AllocBlock(size_t bytes) {
bytes = RoundUp(bytes, kAlignment);
if (!num_blocks_) {
UpdateMainBlock();
}
- byte* storage = reinterpret_cast<byte*>(
+ uint8_t* storage = reinterpret_cast<uint8_t*>(
AllocNonvirtualWithoutAccounting(bytes + sizeof(BlockHeader)));
if (LIKELY(storage != nullptr)) {
BlockHeader* header = reinterpret_cast<BlockHeader*>(storage);
@@ -151,9 +151,9 @@ byte* BumpPointerSpace::AllocBlock(size_t bytes) {
}
void BumpPointerSpace::Walk(ObjectCallback* callback, void* arg) {
- byte* pos = Begin();
- byte* end = End();
- byte* main_end = pos;
+ uint8_t* pos = Begin();
+ uint8_t* end = End();
+ uint8_t* main_end = pos;
{
MutexLock mu(Thread::Current(), block_lock_);
// If we have 0 blocks then we need to update the main header since we have bump pointer style
@@ -179,7 +179,7 @@ void BumpPointerSpace::Walk(ObjectCallback* callback, void* arg) {
return;
} else {
callback(obj, arg);
- pos = reinterpret_cast<byte*>(GetNextObject(obj));
+ pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
}
}
// Walk the other blocks (currently only TLABs).
@@ -189,7 +189,7 @@ void BumpPointerSpace::Walk(ObjectCallback* callback, void* arg) {
pos += sizeof(BlockHeader); // Skip the header so that we know where the objects
mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
const mirror::Object* end = reinterpret_cast<const mirror::Object*>(pos + block_size);
- CHECK_LE(reinterpret_cast<const byte*>(end), End());
+ CHECK_LE(reinterpret_cast<const uint8_t*>(end), End());
// We don't know how many objects are allocated in the current block. When we hit a null class
// assume its the end. TODO: Have a thread update the header when it flushes the block?
while (obj < end && obj->GetClass() != nullptr) {
@@ -250,7 +250,7 @@ void BumpPointerSpace::RevokeThreadLocalBuffersLocked(Thread* thread) {
bool BumpPointerSpace::AllocNewTlab(Thread* self, size_t bytes) {
MutexLock mu(Thread::Current(), block_lock_);
RevokeThreadLocalBuffersLocked(self);
- byte* start = AllocBlock(bytes);
+ uint8_t* start = AllocBlock(bytes);
if (start == nullptr) {
return false;
}
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 71b15baff1..98a3189f1f 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -42,7 +42,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
// Create a bump pointer space with the requested sizes. The requested base address is not
// guaranteed to be granted, if it is required, the caller should call Begin on the returned
// space to confirm the request was granted.
- static BumpPointerSpace* Create(const std::string& name, size_t capacity, byte* requested_begin);
+ static BumpPointerSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap* mem_map);
// Allocate num_bytes, returns nullptr if the space is full.
@@ -121,12 +121,12 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
}
bool Contains(const mirror::Object* obj) const {
- const byte* byte_obj = reinterpret_cast<const byte*>(obj);
+ const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
return byte_obj >= Begin() && byte_obj < End();
}
// TODO: Change this? Mainly used for compacting to a particular region of memory.
- BumpPointerSpace(const std::string& name, byte* begin, byte* limit);
+ BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit);
// Return the object which comes after obj, while ensuring alignment.
static mirror::Object* GetNextObject(mirror::Object* obj)
@@ -161,7 +161,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
BumpPointerSpace(const std::string& name, MemMap* mem_map);
// Allocate a raw block of bytes.
- byte* AllocBlock(size_t bytes) EXCLUSIVE_LOCKS_REQUIRED(block_lock_);
+ uint8_t* AllocBlock(size_t bytes) EXCLUSIVE_LOCKS_REQUIRED(block_lock_);
void RevokeThreadLocalBuffersLocked(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(block_lock_);
// The main block is an unbounded block where objects go when there are no other blocks. This
@@ -169,7 +169,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
// allocation. The main block starts at the space Begin().
void UpdateMainBlock() EXCLUSIVE_LOCKS_REQUIRED(block_lock_);
- byte* growth_end_;
+ uint8_t* growth_end_;
AtomicInteger objects_allocated_; // Accumulated from revoked thread local regions.
AtomicInteger bytes_allocated_; // Accumulated from revoked thread local regions.
Mutex block_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 456d1b31e2..d2d95b4c7b 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -35,8 +35,8 @@ static constexpr bool kPrefetchDuringDlMallocFreeList = true;
template class ValgrindMallocSpace<DlMallocSpace, void*>;
-DlMallocSpace::DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
- byte* end, byte* limit, size_t growth_limit,
+DlMallocSpace::DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, uint8_t* begin,
+ uint8_t* end, uint8_t* limit, size_t growth_limit,
bool can_move_objects, size_t starting_size,
size_t initial_size)
: MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
@@ -57,13 +57,13 @@ DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap* mem_map, const std::strin
}
// Protect memory beyond the starting size. morecore will add r/w permissions when necessory
- byte* end = mem_map->Begin() + starting_size;
+ uint8_t* end = mem_map->Begin() + starting_size;
if (capacity - starting_size > 0) {
CHECK_MEMORY_CALL(mprotect, (end, capacity - starting_size, PROT_NONE), name);
}
// Everything is set so record in immutable structure and leave
- byte* begin = mem_map->Begin();
+ uint8_t* begin = mem_map->Begin();
if (Runtime::Current()->RunningOnValgrind()) {
return new ValgrindMallocSpace<DlMallocSpace, void*>(
name, mem_map, mspace, begin, end, begin + capacity, growth_limit, initial_size,
@@ -75,7 +75,7 @@ DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap* mem_map, const std::strin
}
DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_size,
- size_t growth_limit, size_t capacity, byte* requested_begin,
+ size_t growth_limit, size_t capacity, uint8_t* requested_begin,
bool can_move_objects) {
uint64_t start_time = 0;
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
@@ -149,8 +149,8 @@ mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
}
MallocSpace* DlMallocSpace::CreateInstance(const std::string& name, MemMap* mem_map,
- void* allocator, byte* begin, byte* end,
- byte* limit, size_t growth_limit,
+ void* allocator, uint8_t* begin, uint8_t* end,
+ uint8_t* limit, size_t growth_limit,
bool can_move_objects) {
return new DlMallocSpace(name, mem_map, allocator, begin, end, limit, growth_limit,
can_move_objects, starting_size_, initial_size_);
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 7aff14b665..3b8065e5c4 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -44,7 +44,7 @@ class DlMallocSpace : public MallocSpace {
// the caller should call Begin on the returned space to confirm the
// request was granted.
static DlMallocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, byte* requested_begin, bool can_move_objects);
+ size_t capacity, uint8_t* requested_begin, bool can_move_objects);
// Virtual to allow ValgrindMallocSpace to intercept.
virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
@@ -108,7 +108,7 @@ class DlMallocSpace : public MallocSpace {
void SetFootprintLimit(size_t limit) OVERRIDE;
MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
- byte* begin, byte* end, byte* limit, size_t growth_limit,
+ uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
bool can_move_objects);
uint64_t GetBytesAllocated() OVERRIDE;
@@ -128,8 +128,8 @@ class DlMallocSpace : public MallocSpace {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
protected:
- DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin, byte* end,
- byte* limit, size_t growth_limit, bool can_move_objects, size_t starting_size,
+ DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, uint8_t* begin, uint8_t* end,
+ uint8_t* limit, size_t growth_limit, bool can_move_objects, size_t starting_size,
size_t initial_size);
private:
@@ -144,7 +144,7 @@ class DlMallocSpace : public MallocSpace {
static void* CreateMspace(void* base, size_t morecore_start, size_t initial_size);
// The boundary tag overhead.
- static const size_t kChunkOverhead = kWordSize;
+ static const size_t kChunkOverhead = sizeof(intptr_t);
// Underlying malloc space.
void* mspace_;
diff --git a/runtime/gc/space/dlmalloc_space_base_test.cc b/runtime/gc/space/dlmalloc_space_base_test.cc
index 02fc4a52e1..93fe1559a0 100644
--- a/runtime/gc/space/dlmalloc_space_base_test.cc
+++ b/runtime/gc/space/dlmalloc_space_base_test.cc
@@ -24,7 +24,7 @@ namespace gc {
namespace space {
MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, byte* requested_begin) {
+ size_t capacity, uint8_t* requested_begin) {
return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin, false);
}
diff --git a/runtime/gc/space/dlmalloc_space_random_test.cc b/runtime/gc/space/dlmalloc_space_random_test.cc
index 4b1a1b1cc0..f9b41daad8 100644
--- a/runtime/gc/space/dlmalloc_space_random_test.cc
+++ b/runtime/gc/space/dlmalloc_space_random_test.cc
@@ -23,7 +23,7 @@ namespace gc {
namespace space {
MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, byte* requested_begin) {
+ size_t capacity, uint8_t* requested_begin) {
return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin, false);
}
diff --git a/runtime/gc/space/dlmalloc_space_static_test.cc b/runtime/gc/space/dlmalloc_space_static_test.cc
index d17d0a7d54..5758e0cde9 100644
--- a/runtime/gc/space/dlmalloc_space_static_test.cc
+++ b/runtime/gc/space/dlmalloc_space_static_test.cc
@@ -23,7 +23,7 @@ namespace gc {
namespace space {
MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, byte* requested_begin) {
+ size_t capacity, uint8_t* requested_begin) {
return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin, false);
}
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 59630fe71c..452af90750 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -526,7 +526,7 @@ ImageSpace* ImageSpace::Create(const char* image_location,
}
void ImageSpace::VerifyImageAllocations() {
- byte* current = Begin() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
+ uint8_t* current = Begin() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
while (current < End()) {
DCHECK_ALIGNED(current, kObjectAlignment);
mirror::Object* obj = reinterpret_cast<mirror::Object*>(current);
@@ -595,7 +595,7 @@ ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_locat
bitmap_index));
std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap(
accounting::ContinuousSpaceBitmap::CreateFromMemMap(bitmap_name, image_map.release(),
- reinterpret_cast<byte*>(map->Begin()),
+ reinterpret_cast<uint8_t*>(map->Begin()),
map->Size()));
if (bitmap.get() == nullptr) {
*error_msg = StringPrintf("Could not create bitmap '%s'", bitmap_name.c_str());
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index dad5855748..9434bfe91c 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -45,7 +45,7 @@ class ValgrindLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
mirror::Object* object_without_rdz = reinterpret_cast<mirror::Object*>(
reinterpret_cast<uintptr_t>(obj) + kValgrindRedZoneBytes);
VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<void*>(obj), kValgrindRedZoneBytes);
- VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(object_without_rdz) + num_bytes,
+ VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<uint8_t*>(object_without_rdz) + num_bytes,
kValgrindRedZoneBytes);
if (usable_size != nullptr) {
*usable_size = num_bytes; // Since we have redzones, shrink the usable size.
@@ -84,7 +84,7 @@ void LargeObjectSpace::SwapBitmaps() {
mark_bitmap_->SetName(temp_name);
}
-LargeObjectSpace::LargeObjectSpace(const std::string& name, byte* begin, byte* end)
+LargeObjectSpace::LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end)
: DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
total_objects_allocated_(0), begin_(begin), end_(end) {
@@ -122,8 +122,8 @@ mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
mem_maps_.Put(obj, mem_map);
const size_t allocation_size = mem_map->BaseSize();
DCHECK(bytes_allocated != nullptr);
- begin_ = std::min(begin_, reinterpret_cast<byte*>(obj));
- byte* obj_end = reinterpret_cast<byte*>(obj) + allocation_size;
+ begin_ = std::min(begin_, reinterpret_cast<uint8_t*>(obj));
+ uint8_t* obj_end = reinterpret_cast<uint8_t*>(obj) + allocation_size;
if (end_ == nullptr || obj_end > end_) {
end_ = obj_end;
}
@@ -283,7 +283,7 @@ inline bool FreeListSpace::SortByPrevFree::operator()(const AllocationInfo* a,
return reinterpret_cast<uintptr_t>(a) < reinterpret_cast<uintptr_t>(b);
}
-FreeListSpace* FreeListSpace::Create(const std::string& name, byte* requested_begin, size_t size) {
+FreeListSpace* FreeListSpace::Create(const std::string& name, uint8_t* requested_begin, size_t size) {
CHECK_EQ(size % kAlignment, 0U);
std::string error_msg;
MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
@@ -292,7 +292,7 @@ FreeListSpace* FreeListSpace::Create(const std::string& name, byte* requested_be
return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
}
-FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end)
+FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end)
: LargeObjectSpace(name, begin, end),
mem_map_(mem_map),
lock_("free list space lock", kAllocSpaceLock) {
@@ -319,8 +319,8 @@ void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
while (cur_info < end_info) {
if (!cur_info->IsFree()) {
size_t alloc_size = cur_info->ByteSize();
- byte* byte_start = reinterpret_cast<byte*>(GetAddressForAllocationInfo(cur_info));
- byte* byte_end = byte_start + alloc_size;
+ uint8_t* byte_start = reinterpret_cast<uint8_t*>(GetAddressForAllocationInfo(cur_info));
+ uint8_t* byte_end = byte_start + alloc_size;
callback(byte_start, byte_end, alloc_size, arg);
callback(nullptr, nullptr, 0, arg);
}
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index a63c5c0aae..850a0066c5 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -77,11 +77,11 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
return false;
}
// Current address at which the space begins, which may vary as the space is filled.
- byte* Begin() const {
+ uint8_t* Begin() const {
return begin_;
}
// Current address at which the space ends, which may vary as the space is filled.
- byte* End() const {
+ uint8_t* End() const {
return end_;
}
// Current size of space
@@ -90,14 +90,14 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
}
// Return true if we contain the specified address.
bool Contains(const mirror::Object* obj) const {
- const byte* byte_obj = reinterpret_cast<const byte*>(obj);
+ const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
return Begin() <= byte_obj && byte_obj < End();
}
void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
protected:
- explicit LargeObjectSpace(const std::string& name, byte* begin, byte* end);
+ explicit LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end);
static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg);
// Approximate number of bytes which have been allocated into the space.
@@ -106,8 +106,8 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
uint64_t total_bytes_allocated_;
uint64_t total_objects_allocated_;
// Begin and end, may change as more large objects are allocated.
- byte* begin_;
- byte* end_;
+ uint8_t* begin_;
+ uint8_t* end_;
friend class Space;
@@ -149,7 +149,7 @@ class FreeListSpace FINAL : public LargeObjectSpace {
static constexpr size_t kAlignment = kPageSize;
virtual ~FreeListSpace();
- static FreeListSpace* Create(const std::string& name, byte* requested_begin, size_t capacity);
+ static FreeListSpace* Create(const std::string& name, uint8_t* requested_begin, size_t capacity);
size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
EXCLUSIVE_LOCKS_REQUIRED(lock_);
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
@@ -159,7 +159,7 @@ class FreeListSpace FINAL : public LargeObjectSpace {
void Dump(std::ostream& os) const;
protected:
- FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end);
+ FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end);
size_t GetSlotIndexForAddress(uintptr_t address) const {
DCHECK(Contains(reinterpret_cast<mirror::Object*>(address)));
return (address - reinterpret_cast<uintptr_t>(Begin())) / kAlignment;
diff --git a/runtime/gc/space/large_object_space_test.cc b/runtime/gc/space/large_object_space_test.cc
index c5d8abca40..e17bad8a14 100644
--- a/runtime/gc/space/large_object_space_test.cc
+++ b/runtime/gc/space/large_object_space_test.cc
@@ -55,7 +55,7 @@ void LargeObjectSpaceTest::LargeObjectTest() {
ASSERT_EQ(allocation_size, los->AllocationSize(obj, nullptr));
ASSERT_GE(allocation_size, request_size);
// Fill in our magic value.
- byte magic = (request_size & 0xFF) | 1;
+ uint8_t magic = (request_size & 0xFF) | 1;
memset(obj, magic, request_size);
requests.push_back(std::make_pair(obj, request_size));
}
@@ -73,9 +73,9 @@ void LargeObjectSpaceTest::LargeObjectTest() {
mirror::Object* obj = requests.back().first;
size_t request_size = requests.back().second;
requests.pop_back();
- byte magic = (request_size & 0xFF) | 1;
+ uint8_t magic = (request_size & 0xFF) | 1;
for (size_t k = 0; k < request_size; ++k) {
- ASSERT_EQ(reinterpret_cast<const byte*>(obj)[k], magic);
+ ASSERT_EQ(reinterpret_cast<const uint8_t*>(obj)[k], magic);
}
ASSERT_GE(los->Free(Thread::Current(), obj), request_size);
}
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index ba7e5c1eca..9d1fbbe3dc 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -36,7 +36,7 @@ namespace space {
size_t MallocSpace::bitmap_index_ = 0;
MallocSpace::MallocSpace(const std::string& name, MemMap* mem_map,
- byte* begin, byte* end, byte* limit, size_t growth_limit,
+ uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
bool create_bitmaps, bool can_move_objects, size_t starting_size,
size_t initial_size)
: ContinuousMemMapAllocSpace(name, mem_map, begin, end, limit, kGcRetentionPolicyAlwaysCollect),
@@ -66,7 +66,7 @@ MallocSpace::MallocSpace(const std::string& name, MemMap* mem_map,
}
MemMap* MallocSpace::CreateMemMap(const std::string& name, size_t starting_size, size_t* initial_size,
- size_t* growth_limit, size_t* capacity, byte* requested_begin) {
+ size_t* growth_limit, size_t* capacity, uint8_t* requested_begin) {
// Sanity check arguments
if (starting_size > *initial_size) {
*initial_size = starting_size;
@@ -129,10 +129,10 @@ void MallocSpace::SetGrowthLimit(size_t growth_limit) {
void* MallocSpace::MoreCore(intptr_t increment) {
CheckMoreCoreForPrecondition();
- byte* original_end = End();
+ uint8_t* original_end = End();
if (increment != 0) {
VLOG(heap) << "MallocSpace::MoreCore " << PrettySize(increment);
- byte* new_end = original_end + increment;
+ uint8_t* new_end = original_end + increment;
if (increment > 0) {
// Should never be asked to increase the allocation beyond the capacity of the space. Enforced
// by mspace_set_footprint_limit.
@@ -163,7 +163,7 @@ ZygoteSpace* MallocSpace::CreateZygoteSpace(const char* alloc_space_name, bool l
// alloc space so that we won't mix thread local runs from different
// alloc spaces.
RevokeAllThreadLocalBuffers();
- SetEnd(reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(End()), kPageSize)));
+ SetEnd(reinterpret_cast<uint8_t*>(RoundUp(reinterpret_cast<uintptr_t>(End()), kPageSize)));
DCHECK(IsAligned<accounting::CardTable::kCardSize>(begin_));
DCHECK(IsAligned<accounting::CardTable::kCardSize>(End()));
DCHECK(IsAligned<kPageSize>(begin_));
@@ -194,7 +194,7 @@ ZygoteSpace* MallocSpace::CreateZygoteSpace(const char* alloc_space_name, bool l
void* allocator = CreateAllocator(End(), starting_size_, initial_size_, capacity,
low_memory_mode);
// Protect memory beyond the initial size.
- byte* end = mem_map->Begin() + starting_size_;
+ uint8_t* end = mem_map->Begin() + starting_size_;
if (capacity > initial_size_) {
CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size_, PROT_NONE), alloc_space_name);
}
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index bace3f6e63..7230116106 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -115,7 +115,7 @@ class MallocSpace : public ContinuousMemMapAllocSpace {
void SetGrowthLimit(size_t growth_limit);
virtual MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
- byte* begin, byte* end, byte* limit, size_t growth_limit,
+ uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
bool can_move_objects) = 0;
// Splits ourself into a zygote space and new malloc space which has our unused memory. When true,
@@ -138,12 +138,12 @@ class MallocSpace : public ContinuousMemMapAllocSpace {
}
protected:
- MallocSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end,
- byte* limit, size_t growth_limit, bool create_bitmaps, bool can_move_objects,
+ MallocSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end,
+ uint8_t* limit, size_t growth_limit, bool create_bitmaps, bool can_move_objects,
size_t starting_size, size_t initial_size);
static MemMap* CreateMemMap(const std::string& name, size_t starting_size, size_t* initial_size,
- size_t* growth_limit, size_t* capacity, byte* requested_begin);
+ size_t* growth_limit, size_t* capacity, uint8_t* requested_begin);
// When true the low memory mode argument specifies that the heap wishes the created allocator to
// be more aggressive in releasing unused pages.
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index 3f39c7707b..d25694ad24 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -42,8 +42,8 @@ static constexpr bool kVerifyFreedBytes = false;
// template class ValgrindMallocSpace<RosAllocSpace, allocator::RosAlloc*>;
RosAllocSpace::RosAllocSpace(const std::string& name, MemMap* mem_map,
- art::gc::allocator::RosAlloc* rosalloc, byte* begin, byte* end,
- byte* limit, size_t growth_limit, bool can_move_objects,
+ art::gc::allocator::RosAlloc* rosalloc, uint8_t* begin, uint8_t* end,
+ uint8_t* limit, size_t growth_limit, bool can_move_objects,
size_t starting_size, size_t initial_size, bool low_memory_mode)
: MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
starting_size, initial_size),
@@ -64,13 +64,13 @@ RosAllocSpace* RosAllocSpace::CreateFromMemMap(MemMap* mem_map, const std::strin
}
// Protect memory beyond the starting size. MoreCore will add r/w permissions when necessory
- byte* end = mem_map->Begin() + starting_size;
+ uint8_t* end = mem_map->Begin() + starting_size;
if (capacity - starting_size > 0) {
CHECK_MEMORY_CALL(mprotect, (end, capacity - starting_size, PROT_NONE), name);
}
// Everything is set so record in immutable structure and leave
- byte* begin = mem_map->Begin();
+ uint8_t* begin = mem_map->Begin();
// TODO: Fix RosAllocSpace to support valgrind. There is currently some issues with
// AllocationSize caused by redzones. b/12944686
if (false && Runtime::Current()->GetHeap()->RunningOnValgrind()) {
@@ -86,7 +86,7 @@ RosAllocSpace::~RosAllocSpace() {
}
RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_size,
- size_t growth_limit, size_t capacity, byte* requested_begin,
+ size_t growth_limit, size_t capacity, uint8_t* requested_begin,
bool low_memory_mode, bool can_move_objects) {
uint64_t start_time = 0;
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
@@ -164,7 +164,7 @@ mirror::Object* RosAllocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
}
MallocSpace* RosAllocSpace::CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
- byte* begin, byte* end, byte* limit, size_t growth_limit,
+ uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
bool can_move_objects) {
return new RosAllocSpace(name, mem_map, reinterpret_cast<allocator::RosAlloc*>(allocator),
begin, end, limit, growth_limit, can_move_objects, starting_size_,
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index f1ce115bde..46fffaad24 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -39,7 +39,7 @@ class RosAllocSpace : public MallocSpace {
// the caller should call Begin on the returned space to confirm the
// request was granted.
static RosAllocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, byte* requested_begin, bool low_memory_mode,
+ size_t capacity, uint8_t* requested_begin, bool low_memory_mode,
bool can_move_objects);
static RosAllocSpace* CreateFromMemMap(MemMap* mem_map, const std::string& name,
size_t starting_size, size_t initial_size,
@@ -93,7 +93,7 @@ class RosAllocSpace : public MallocSpace {
void Clear() OVERRIDE;
MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
- byte* begin, byte* end, byte* limit, size_t growth_limit,
+ uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
bool can_move_objects) OVERRIDE;
uint64_t GetBytesAllocated() OVERRIDE;
@@ -127,7 +127,7 @@ class RosAllocSpace : public MallocSpace {
protected:
RosAllocSpace(const std::string& name, MemMap* mem_map, allocator::RosAlloc* rosalloc,
- byte* begin, byte* end, byte* limit, size_t growth_limit, bool can_move_objects,
+ uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit, bool can_move_objects,
size_t starting_size, size_t initial_size, bool low_memory_mode);
private:
diff --git a/runtime/gc/space/rosalloc_space_base_test.cc b/runtime/gc/space/rosalloc_space_base_test.cc
index c3157fa7d7..0c5be03180 100644
--- a/runtime/gc/space/rosalloc_space_base_test.cc
+++ b/runtime/gc/space/rosalloc_space_base_test.cc
@@ -21,7 +21,7 @@ namespace gc {
namespace space {
MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, byte* requested_begin) {
+ size_t capacity, uint8_t* requested_begin) {
return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin,
Runtime::Current()->GetHeap()->IsLowMemoryMode(), false);
}
diff --git a/runtime/gc/space/rosalloc_space_random_test.cc b/runtime/gc/space/rosalloc_space_random_test.cc
index 864bbc9968..ca3aff47f0 100644
--- a/runtime/gc/space/rosalloc_space_random_test.cc
+++ b/runtime/gc/space/rosalloc_space_random_test.cc
@@ -21,7 +21,7 @@ namespace gc {
namespace space {
MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, byte* requested_begin) {
+ size_t capacity, uint8_t* requested_begin) {
return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin,
Runtime::Current()->GetHeap()->IsLowMemoryMode(), false);
}
diff --git a/runtime/gc/space/rosalloc_space_static_test.cc b/runtime/gc/space/rosalloc_space_static_test.cc
index c0e2ac8a09..a78623e593 100644
--- a/runtime/gc/space/rosalloc_space_static_test.cc
+++ b/runtime/gc/space/rosalloc_space_static_test.cc
@@ -21,7 +21,7 @@ namespace gc {
namespace space {
MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, byte* requested_begin) {
+ size_t capacity, uint8_t* requested_begin) {
return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin,
Runtime::Current()->GetHeap()->IsLowMemoryMode(), false);
}
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 523d4fe8fd..860a4c9f39 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -246,27 +246,27 @@ class AllocSpace {
class ContinuousSpace : public Space {
public:
// Address at which the space begins.
- byte* Begin() const {
+ uint8_t* Begin() const {
return begin_;
}
// Current address at which the space ends, which may vary as the space is filled.
- byte* End() const {
+ uint8_t* End() const {
return end_.LoadRelaxed();
}
// The end of the address range covered by the space.
- byte* Limit() const {
+ uint8_t* Limit() const {
return limit_;
}
// Change the end of the space. Be careful with use since changing the end of a space to an
// invalid value may break the GC.
- void SetEnd(byte* end) {
+ void SetEnd(uint8_t* end) {
end_.StoreRelaxed(end);
}
- void SetLimit(byte* limit) {
+ void SetLimit(uint8_t* limit) {
limit_ = limit;
}
@@ -286,7 +286,7 @@ class ContinuousSpace : public Space {
// Is object within this space? We check to see if the pointer is beyond the end first as
// continuous spaces are iterated over from low to high.
bool HasAddress(const mirror::Object* obj) const {
- const byte* byte_ptr = reinterpret_cast<const byte*>(obj);
+ const uint8_t* byte_ptr = reinterpret_cast<const uint8_t*>(obj);
return byte_ptr >= Begin() && byte_ptr < Limit();
}
@@ -302,18 +302,18 @@ class ContinuousSpace : public Space {
protected:
ContinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy,
- byte* begin, byte* end, byte* limit) :
+ uint8_t* begin, uint8_t* end, uint8_t* limit) :
Space(name, gc_retention_policy), begin_(begin), end_(end), limit_(limit) {
}
// The beginning of the storage for fast access.
- byte* begin_;
+ uint8_t* begin_;
// Current end of the space.
- Atomic<byte*> end_;
+ Atomic<uint8_t*> end_;
// Limit of the space.
- byte* limit_;
+ uint8_t* limit_;
private:
DISALLOW_COPY_AND_ASSIGN(ContinuousSpace);
@@ -369,7 +369,7 @@ class MemMapSpace : public ContinuousSpace {
}
protected:
- MemMapSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end, byte* limit,
+ MemMapSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end, uint8_t* limit,
GcRetentionPolicy gc_retention_policy)
: ContinuousSpace(name, gc_retention_policy, begin, end, limit),
mem_map_(mem_map) {
@@ -425,8 +425,8 @@ class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
std::unique_ptr<accounting::ContinuousSpaceBitmap> mark_bitmap_;
std::unique_ptr<accounting::ContinuousSpaceBitmap> temp_bitmap_;
- ContinuousMemMapAllocSpace(const std::string& name, MemMap* mem_map, byte* begin,
- byte* end, byte* limit, GcRetentionPolicy gc_retention_policy)
+ ContinuousMemMapAllocSpace(const std::string& name, MemMap* mem_map, uint8_t* begin,
+ uint8_t* end, uint8_t* limit, GcRetentionPolicy gc_retention_policy)
: MemMapSpace(name, mem_map, begin, end, limit, gc_retention_policy) {
}
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index 7211bb473c..9f39b80c9a 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -110,7 +110,7 @@ class SpaceTest : public CommonRuntimeTest {
}
typedef MallocSpace* (*CreateSpaceFn)(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, byte* requested_begin);
+ size_t capacity, uint8_t* requested_begin);
void InitTestBody(CreateSpaceFn create_space);
void ZygoteSpaceTestBody(CreateSpaceFn create_space);
void AllocAndFreeTestBody(CreateSpaceFn create_space);
diff --git a/runtime/gc/space/valgrind_malloc_space-inl.h b/runtime/gc/space/valgrind_malloc_space-inl.h
index 966c276f42..a6b837c09b 100644
--- a/runtime/gc/space/valgrind_malloc_space-inl.h
+++ b/runtime/gc/space/valgrind_malloc_space-inl.h
@@ -39,10 +39,10 @@ mirror::Object* ValgrindMallocSpace<S, A>::AllocWithGrowth(Thread* self, size_t
return nullptr;
}
mirror::Object* result = reinterpret_cast<mirror::Object*>(
- reinterpret_cast<byte*>(obj_with_rdz) + kValgrindRedZoneBytes);
+ reinterpret_cast<uint8_t*>(obj_with_rdz) + kValgrindRedZoneBytes);
// Make redzones as no access.
VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes);
- VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(result) + num_bytes, kValgrindRedZoneBytes);
+ VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<uint8_t*>(result) + num_bytes, kValgrindRedZoneBytes);
return result;
}
@@ -56,24 +56,24 @@ mirror::Object* ValgrindMallocSpace<S, A>::Alloc(Thread* self, size_t num_bytes,
return nullptr;
}
mirror::Object* result = reinterpret_cast<mirror::Object*>(
- reinterpret_cast<byte*>(obj_with_rdz) + kValgrindRedZoneBytes);
+ reinterpret_cast<uint8_t*>(obj_with_rdz) + kValgrindRedZoneBytes);
// Make redzones as no access.
VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes);
- VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(result) + num_bytes, kValgrindRedZoneBytes);
+ VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<uint8_t*>(result) + num_bytes, kValgrindRedZoneBytes);
return result;
}
template <typename S, typename A>
size_t ValgrindMallocSpace<S, A>::AllocationSize(mirror::Object* obj, size_t* usable_size) {
size_t result = S::AllocationSize(reinterpret_cast<mirror::Object*>(
- reinterpret_cast<byte*>(obj) - kValgrindRedZoneBytes), usable_size);
+ reinterpret_cast<uint8_t*>(obj) - kValgrindRedZoneBytes), usable_size);
return result;
}
template <typename S, typename A>
size_t ValgrindMallocSpace<S, A>::Free(Thread* self, mirror::Object* ptr) {
void* obj_after_rdz = reinterpret_cast<void*>(ptr);
- void* obj_with_rdz = reinterpret_cast<byte*>(obj_after_rdz) - kValgrindRedZoneBytes;
+ void* obj_with_rdz = reinterpret_cast<uint8_t*>(obj_after_rdz) - kValgrindRedZoneBytes;
// Make redzones undefined.
size_t usable_size = 0;
AllocationSize(ptr, &usable_size);
@@ -93,8 +93,8 @@ size_t ValgrindMallocSpace<S, A>::FreeList(Thread* self, size_t num_ptrs, mirror
template <typename S, typename A>
ValgrindMallocSpace<S, A>::ValgrindMallocSpace(const std::string& name, MemMap* mem_map,
- A allocator, byte* begin,
- byte* end, byte* limit, size_t growth_limit,
+ A allocator, uint8_t* begin,
+ uint8_t* end, uint8_t* limit, size_t growth_limit,
size_t initial_size,
bool can_move_objects, size_t starting_size) :
S(name, mem_map, allocator, begin, end, limit, growth_limit, can_move_objects, starting_size,
diff --git a/runtime/gc/space/valgrind_malloc_space.h b/runtime/gc/space/valgrind_malloc_space.h
index 200ad83667..eb6fe9c32b 100644
--- a/runtime/gc/space/valgrind_malloc_space.h
+++ b/runtime/gc/space/valgrind_malloc_space.h
@@ -47,7 +47,7 @@ class ValgrindMallocSpace FINAL : public BaseMallocSpaceType {
}
ValgrindMallocSpace(const std::string& name, MemMap* mem_map, AllocatorType allocator,
- byte* begin, byte* end, byte* limit, size_t growth_limit,
+ uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
size_t initial_size, bool can_move_objects, size_t starting_size);
virtual ~ValgrindMallocSpace() {}
diff --git a/runtime/globals.h b/runtime/globals.h
index 107e0646a8..b7bd44d7c9 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -24,22 +24,14 @@
namespace art {
-typedef uint8_t byte;
-typedef intptr_t word;
-typedef uintptr_t uword;
-
static constexpr size_t KB = 1024;
static constexpr size_t MB = KB * KB;
static constexpr size_t GB = KB * KB * KB;
// Runtime sizes.
-static constexpr size_t kWordSize = sizeof(word);
-static constexpr size_t kPointerSize = sizeof(void*);
-
static constexpr size_t kBitsPerByte = 8;
static constexpr size_t kBitsPerByteLog2 = 3;
-static constexpr int kBitsPerWord = kWordSize * kBitsPerByte;
-static constexpr size_t kWordHighBitMask = static_cast<size_t>(1) << (kBitsPerWord - 1);
+static constexpr int kBitsPerIntPtrT = sizeof(intptr_t) * kBitsPerByte;
// Required stack alignment
static constexpr size_t kStackAlignment = 16;
diff --git a/runtime/handle_scope_test.cc b/runtime/handle_scope_test.cc
index de563c1dec..7afd279942 100644
--- a/runtime/handle_scope_test.cc
+++ b/runtime/handle_scope_test.cc
@@ -46,17 +46,17 @@ TEST(HandleScopeTest, Offsets) NO_THREAD_SAFETY_ANALYSIS {
test_table.SetLink(reinterpret_cast<HandleScope*>(0x5678));
test_table.SetNumberOfReferences(0x9ABC);
- byte* table_base_ptr = reinterpret_cast<byte*>(&test_table);
+ uint8_t* table_base_ptr = reinterpret_cast<uint8_t*>(&test_table);
{
uintptr_t* link_ptr = reinterpret_cast<uintptr_t*>(table_base_ptr +
- HandleScope::LinkOffset(kPointerSize));
+ HandleScope::LinkOffset(sizeof(void*)));
EXPECT_EQ(*link_ptr, static_cast<size_t>(0x5678));
}
{
uint32_t* num_ptr = reinterpret_cast<uint32_t*>(table_base_ptr +
- HandleScope::NumberOfReferencesOffset(kPointerSize));
+ HandleScope::NumberOfReferencesOffset(sizeof(void*)));
EXPECT_EQ(*num_ptr, static_cast<size_t>(0x9ABC));
}
@@ -66,7 +66,7 @@ TEST(HandleScopeTest, Offsets) NO_THREAD_SAFETY_ANALYSIS {
EXPECT_EQ(sizeof(StackReference<mirror::Object>), sizeof(uint32_t));
uint32_t* ref_ptr = reinterpret_cast<uint32_t*>(table_base_ptr +
- HandleScope::ReferencesOffset(kPointerSize));
+ HandleScope::ReferencesOffset(sizeof(void*)));
EXPECT_EQ(*ref_ptr, static_cast<uint32_t>(0x1234));
}
}
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index fd67197986..a2d37b3def 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -719,9 +719,9 @@ static HprofBasicType SignatureToBasicTypeAndSize(const char* sig, size_t* sizeO
case 'D': ret = hprof_basic_double; size = 8; break;
case 'B': ret = hprof_basic_byte; size = 1; break;
case 'S': ret = hprof_basic_short; size = 2; break;
- default: CHECK(false);
case 'I': ret = hprof_basic_int; size = 4; break;
case 'J': ret = hprof_basic_long; size = 8; break;
+ default: LOG(FATAL) << "UNREACHABLE"; UNREACHABLE();
}
if (sizeOut != NULL) {
@@ -742,9 +742,9 @@ static HprofBasicType PrimitiveToBasicTypeAndSize(Primitive::Type prim, size_t*
case Primitive::kPrimDouble: ret = hprof_basic_double; size = 8; break;
case Primitive::kPrimByte: ret = hprof_basic_byte; size = 1; break;
case Primitive::kPrimShort: ret = hprof_basic_short; size = 2; break;
- default: CHECK(false);
case Primitive::kPrimInt: ret = hprof_basic_int; size = 4; break;
case Primitive::kPrimLong: ret = hprof_basic_long; size = 8; break;
+ default: LOG(FATAL) << "UNREACHABLE"; UNREACHABLE();
}
if (sizeOut != NULL) {
diff --git a/runtime/image.cc b/runtime/image.cc
index 478b486d91..c065d8ee35 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -23,8 +23,8 @@
namespace art {
-const byte ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const byte ImageHeader::kImageVersion[] = { '0', '0', '9', '\0' };
+const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '0', '\0' };
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/image.h b/runtime/image.h
index 424a40b7ca..ec95d01f57 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -44,8 +44,8 @@ class PACKED(4) ImageHeader {
bool IsValid() const;
const char* GetMagic() const;
- byte* GetImageBegin() const {
- return reinterpret_cast<byte*>(image_begin_);
+ uint8_t* GetImageBegin() const {
+ return reinterpret_cast<uint8_t*>(image_begin_);
}
size_t GetImageSize() const {
@@ -68,20 +68,20 @@ class PACKED(4) ImageHeader {
oat_checksum_ = oat_checksum;
}
- byte* GetOatFileBegin() const {
- return reinterpret_cast<byte*>(oat_file_begin_);
+ uint8_t* GetOatFileBegin() const {
+ return reinterpret_cast<uint8_t*>(oat_file_begin_);
}
- byte* GetOatDataBegin() const {
- return reinterpret_cast<byte*>(oat_data_begin_);
+ uint8_t* GetOatDataBegin() const {
+ return reinterpret_cast<uint8_t*>(oat_data_begin_);
}
- byte* GetOatDataEnd() const {
- return reinterpret_cast<byte*>(oat_data_end_);
+ uint8_t* GetOatDataEnd() const {
+ return reinterpret_cast<uint8_t*>(oat_data_end_);
}
- byte* GetOatFileEnd() const {
- return reinterpret_cast<byte*>(oat_file_end_);
+ uint8_t* GetOatFileEnd() const {
+ return reinterpret_cast<uint8_t*>(oat_file_end_);
}
off_t GetPatchDelta() const {
@@ -121,11 +121,11 @@ class PACKED(4) ImageHeader {
void RelocateImage(off_t delta);
private:
- static const byte kImageMagic[4];
- static const byte kImageVersion[4];
+ static const uint8_t kImageMagic[4];
+ static const uint8_t kImageVersion[4];
- byte magic_[4];
- byte version_[4];
+ uint8_t magic_[4];
+ uint8_t version_[4];
// Required base address for mapping the image.
uint32_t image_begin_;
diff --git a/runtime/instruction_set_test.cc b/runtime/instruction_set_test.cc
index ac17c4f0d4..80191b1387 100644
--- a/runtime/instruction_set_test.cc
+++ b/runtime/instruction_set_test.cc
@@ -47,7 +47,7 @@ TEST_F(InstructionSetTest, TestRoundTrip) {
}
TEST_F(InstructionSetTest, PointerSize) {
- EXPECT_EQ(kPointerSize, GetInstructionSetPointerSize(kRuntimeISA));
+ EXPECT_EQ(sizeof(void*), GetInstructionSetPointerSize(kRuntimeISA));
}
} // namespace art
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 7c8c63ce46..d1229b28a8 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -181,8 +181,14 @@ JdwpError JdwpState::RegisterEvent(JdwpEvent* pEvent) {
for (int i = 0; i < pEvent->modCount; i++) {
const JdwpEventMod* pMod = &pEvent->mods[i];
if (pMod->modKind == MK_LOCATION_ONLY) {
- /* should only be for Breakpoint, Step, and Exception */
- Dbg::WatchLocation(&pMod->locationOnly.loc, &req);
+ // Should only concern breakpoint, field access, field modification, step, and exception
+ // events.
+ // However breakpoint requires specific handling. Field access, field modification and step
+ // events need full deoptimization to be reported while exception event is reported during
+ // exception handling.
+ if (pEvent->eventKind == EK_BREAKPOINT) {
+ Dbg::WatchLocation(&pMod->locationOnly.loc, &req);
+ }
} else if (pMod->modKind == MK_STEP) {
/* should only be for EK_SINGLE_STEP; should only be one */
JdwpStepSize size = static_cast<JdwpStepSize>(pMod->step.size);
@@ -258,8 +264,10 @@ void JdwpState::UnregisterEvent(JdwpEvent* pEvent) {
for (int i = 0; i < pEvent->modCount; i++) {
JdwpEventMod* pMod = &pEvent->mods[i];
if (pMod->modKind == MK_LOCATION_ONLY) {
- /* should only be for Breakpoint, Step, and Exception */
- Dbg::UnwatchLocation(&pMod->locationOnly.loc, &req);
+ // Like in RegisterEvent, we need specific handling for breakpoint only.
+ if (pEvent->eventKind == EK_BREAKPOINT) {
+ Dbg::UnwatchLocation(&pMod->locationOnly.loc, &req);
+ }
}
if (pMod->modKind == MK_STEP) {
/* should only be for EK_SINGLE_STEP; should only be one */
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index d755cb98a9..231e9e56b0 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -189,7 +189,7 @@ static bool CheckNonOverlapping(uintptr_t begin,
// non-null, we check that pointer is the actual_ptr == expected_ptr,
// and if not, report in error_msg what the conflict mapping was if
// found, or a generic error in other cases.
-static bool CheckMapRequest(byte* expected_ptr, void* actual_ptr, size_t byte_count,
+static bool CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte_count,
std::string* error_msg) {
// Handled first by caller for more specific error messages.
CHECK(actual_ptr != MAP_FAILED);
@@ -234,7 +234,7 @@ static bool CheckMapRequest(byte* expected_ptr, void* actual_ptr, size_t byte_co
return false;
}
-MemMap* MemMap::MapAnonymous(const char* name, byte* expected_ptr, size_t byte_count, int prot,
+MemMap* MemMap::MapAnonymous(const char* name, uint8_t* expected_ptr, size_t byte_count, int prot,
bool low_4gb, std::string* error_msg) {
if (byte_count == 0) {
return new MemMap(name, nullptr, 0, nullptr, 0, prot, false);
@@ -377,11 +377,11 @@ MemMap* MemMap::MapAnonymous(const char* name, byte* expected_ptr, size_t byte_c
if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
return nullptr;
}
- return new MemMap(name, reinterpret_cast<byte*>(actual), byte_count, actual,
+ return new MemMap(name, reinterpret_cast<uint8_t*>(actual), byte_count, actual,
page_aligned_byte_count, prot, false);
}
-MemMap* MemMap::MapFileAtAddress(byte* expected_ptr, size_t byte_count, int prot, int flags, int fd,
+MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr, size_t byte_count, int prot, int flags, int fd,
off_t start, bool reuse, const char* filename,
std::string* error_msg) {
CHECK_NE(0, prot);
@@ -414,9 +414,9 @@ MemMap* MemMap::MapFileAtAddress(byte* expected_ptr, size_t byte_count, int prot
size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
// The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but
// not necessarily to virtual memory. mmap will page align 'expected' for us.
- byte* page_aligned_expected = (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
+ uint8_t* page_aligned_expected = (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
- byte* actual = reinterpret_cast<byte*>(mmap(page_aligned_expected,
+ uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(page_aligned_expected,
page_aligned_byte_count,
prot,
flags,
@@ -468,7 +468,7 @@ MemMap::~MemMap() {
CHECK(found) << "MemMap not found";
}
-MemMap::MemMap(const std::string& name, byte* begin, size_t size, void* base_begin,
+MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin,
size_t base_size, int prot, bool reuse)
: name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
prot_(prot), reuse_(reuse) {
@@ -487,27 +487,27 @@ MemMap::MemMap(const std::string& name, byte* begin, size_t size, void* base_beg
}
}
-MemMap* MemMap::RemapAtEnd(byte* new_end, const char* tail_name, int tail_prot,
+MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot,
std::string* error_msg) {
DCHECK_GE(new_end, Begin());
DCHECK_LE(new_end, End());
- DCHECK_LE(begin_ + size_, reinterpret_cast<byte*>(base_begin_) + base_size_);
+ DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
DCHECK(IsAligned<kPageSize>(begin_));
DCHECK(IsAligned<kPageSize>(base_begin_));
- DCHECK(IsAligned<kPageSize>(reinterpret_cast<byte*>(base_begin_) + base_size_));
+ DCHECK(IsAligned<kPageSize>(reinterpret_cast<uint8_t*>(base_begin_) + base_size_));
DCHECK(IsAligned<kPageSize>(new_end));
- byte* old_end = begin_ + size_;
- byte* old_base_end = reinterpret_cast<byte*>(base_begin_) + base_size_;
- byte* new_base_end = new_end;
+ uint8_t* old_end = begin_ + size_;
+ uint8_t* old_base_end = reinterpret_cast<uint8_t*>(base_begin_) + base_size_;
+ uint8_t* new_base_end = new_end;
DCHECK_LE(new_base_end, old_base_end);
if (new_base_end == old_base_end) {
return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot, false);
}
- size_ = new_end - reinterpret_cast<byte*>(begin_);
- base_size_ = new_base_end - reinterpret_cast<byte*>(base_begin_);
- DCHECK_LE(begin_ + size_, reinterpret_cast<byte*>(base_begin_) + base_size_);
+ size_ = new_end - reinterpret_cast<uint8_t*>(begin_);
+ base_size_ = new_base_end - reinterpret_cast<uint8_t*>(base_begin_);
+ DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
size_t tail_size = old_end - new_end;
- byte* tail_base_begin = new_base_end;
+ uint8_t* tail_base_begin = new_base_end;
size_t tail_base_size = old_base_end - new_base_end;
DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
DCHECK(IsAligned<kPageSize>(tail_base_size));
@@ -543,7 +543,7 @@ MemMap* MemMap::RemapAtEnd(byte* new_end, const char* tail_name, int tail_prot,
// calls. Otherwise, libc (or something else) might take this memory
// region. Note this isn't perfect as there's no way to prevent
// other threads to try to take this memory region here.
- byte* actual = reinterpret_cast<byte*>(mmap(tail_base_begin, tail_base_size, tail_prot,
+ uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(tail_base_begin, tail_base_size, tail_prot,
flags, fd.get(), 0));
if (actual == MAP_FAILED) {
std::string maps;
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index e49ed48d69..314bf8d800 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -60,7 +60,7 @@ class MemMap {
// a name.
//
// On success, returns returns a MemMap instance. On failure, returns a NULL;
- static MemMap* MapAnonymous(const char* ashmem_name, byte* addr, size_t byte_count, int prot,
+ static MemMap* MapAnonymous(const char* ashmem_name, uint8_t* addr, size_t byte_count, int prot,
bool low_4gb, std::string* error_msg);
// Map part of a file, taking care of non-page aligned offsets. The
@@ -80,7 +80,7 @@ class MemMap {
//
// On success, returns returns a MemMap instance. On failure, returns a
// nullptr;
- static MemMap* MapFileAtAddress(byte* addr, size_t byte_count, int prot, int flags, int fd,
+ static MemMap* MapFileAtAddress(uint8_t* addr, size_t byte_count, int prot, int flags, int fd,
off_t start, bool reuse, const char* filename,
std::string* error_msg);
@@ -99,7 +99,7 @@ class MemMap {
return prot_;
}
- byte* Begin() const {
+ uint8_t* Begin() const {
return begin_;
}
@@ -107,7 +107,7 @@ class MemMap {
return size_;
}
- byte* End() const {
+ uint8_t* End() const {
return Begin() + Size();
}
@@ -120,7 +120,7 @@ class MemMap {
}
void* BaseEnd() const {
- return reinterpret_cast<byte*>(BaseBegin()) + BaseSize();
+ return reinterpret_cast<uint8_t*>(BaseBegin()) + BaseSize();
}
bool HasAddress(const void* addr) const {
@@ -128,7 +128,7 @@ class MemMap {
}
// Unmap the pages at end and remap them to create another memory map.
- MemMap* RemapAtEnd(byte* new_end, const char* tail_name, int tail_prot,
+ MemMap* RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot,
std::string* error_msg);
static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map)
@@ -139,7 +139,7 @@ class MemMap {
typedef AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps> Maps;
private:
- MemMap(const std::string& name, byte* begin, size_t size, void* base_begin, size_t base_size,
+ MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin, size_t base_size,
int prot, bool reuse) LOCKS_EXCLUDED(Locks::mem_maps_lock_);
static void DumpMapsLocked(std::ostream& os)
@@ -150,7 +150,7 @@ class MemMap {
EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_);
const std::string name_;
- byte* const begin_; // Start of data.
+ uint8_t* const begin_; // Start of data.
size_t size_; // Length of data.
void* const base_begin_; // Page-aligned base address.
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index e54d0e013d..a78f4631f7 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -26,8 +26,8 @@ namespace art {
class MemMapTest : public testing::Test {
public:
- static byte* BaseBegin(MemMap* mem_map) {
- return reinterpret_cast<byte*>(mem_map->base_begin_);
+ static uint8_t* BaseBegin(MemMap* mem_map) {
+ return reinterpret_cast<uint8_t*>(mem_map->base_begin_);
}
static size_t BaseSize(MemMap* mem_map) {
return mem_map->base_size_;
@@ -45,7 +45,7 @@ class MemMapTest : public testing::Test {
low_4gb,
&error_msg);
// Check its state and write to it.
- byte* base0 = m0->Begin();
+ uint8_t* base0 = m0->Begin();
ASSERT_TRUE(base0 != nullptr) << error_msg;
size_t size0 = m0->Size();
EXPECT_EQ(m0->Size(), 2 * page_size);
@@ -62,7 +62,7 @@ class MemMapTest : public testing::Test {
EXPECT_EQ(m0->Size(), page_size);
EXPECT_EQ(BaseBegin(m0), base0);
EXPECT_EQ(BaseSize(m0), page_size);
- byte* base1 = m1->Begin();
+ uint8_t* base1 = m1->Begin();
size_t size1 = m1->Size();
EXPECT_EQ(base1, base0 + page_size);
EXPECT_EQ(size1, page_size);
@@ -160,7 +160,7 @@ TEST_F(MemMapTest, MapAnonymousExactAddr) {
std::string error_msg;
// Map at an address that should work, which should succeed.
std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
- reinterpret_cast<byte*>(ART_BASE_ADDRESS),
+ reinterpret_cast<uint8_t*>(ART_BASE_ADDRESS),
kPageSize,
PROT_READ | PROT_WRITE,
false,
@@ -180,7 +180,7 @@ TEST_F(MemMapTest, MapAnonymousExactAddr) {
ASSERT_TRUE(map1->BaseBegin() != nullptr);
// Attempt to map at the same address, which should fail.
std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
- reinterpret_cast<byte*>(map1->BaseBegin()),
+ reinterpret_cast<uint8_t*>(map1->BaseBegin()),
kPageSize,
PROT_READ | PROT_WRITE,
false,
@@ -205,7 +205,7 @@ TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
uintptr_t start_addr = ART_BASE_ADDRESS + 0x1000000;
std::string error_msg;
std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
- reinterpret_cast<byte*>(start_addr),
+ reinterpret_cast<uint8_t*>(start_addr),
0x21000000,
PROT_READ | PROT_WRITE,
true,
@@ -221,7 +221,7 @@ TEST_F(MemMapTest, MapAnonymousOverflow) {
uintptr_t ptr = 0;
ptr -= kPageSize; // Now it's close to the top.
std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousOverflow",
- reinterpret_cast<byte*>(ptr),
+ reinterpret_cast<uint8_t*>(ptr),
2 * kPageSize, // brings it over the top.
PROT_READ | PROT_WRITE,
false,
@@ -234,7 +234,7 @@ TEST_F(MemMapTest, MapAnonymousOverflow) {
TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
std::string error_msg;
std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
- reinterpret_cast<byte*>(UINT64_C(0x100000000)),
+ reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
kPageSize,
PROT_READ | PROT_WRITE,
true,
@@ -246,7 +246,7 @@ TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
std::string error_msg;
std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
- reinterpret_cast<byte*>(0xF0000000),
+ reinterpret_cast<uint8_t*>(0xF0000000),
0x20000000,
PROT_READ | PROT_WRITE,
true,
@@ -269,7 +269,7 @@ TEST_F(MemMapTest, CheckNoGaps) {
ASSERT_TRUE(map.get() != nullptr) << error_msg;
ASSERT_TRUE(error_msg.empty());
// Record the base address.
- byte* map_base = reinterpret_cast<byte*>(map->BaseBegin());
+ uint8_t* map_base = reinterpret_cast<uint8_t*>(map->BaseBegin());
// Unmap it.
map.reset();
diff --git a/runtime/memory_region.h b/runtime/memory_region.h
index bab2e862b9..645996366b 100644
--- a/runtime/memory_region.h
+++ b/runtime/memory_region.h
@@ -31,7 +31,7 @@ namespace art {
class MemoryRegion {
public:
MemoryRegion() : pointer_(NULL), size_(0) {}
- MemoryRegion(void* pointer, uword size) : pointer_(pointer), size_(size) {}
+ MemoryRegion(void* pointer, uintptr_t size) : pointer_(pointer), size_(size) {}
void* pointer() const { return pointer_; }
size_t size() const { return size_; }
@@ -41,8 +41,8 @@ class MemoryRegion {
return OFFSETOF_MEMBER(MemoryRegion, pointer_);
}
- byte* start() const { return reinterpret_cast<byte*>(pointer_); }
- byte* end() const { return start() + size_; }
+ uint8_t* start() const { return reinterpret_cast<uint8_t*>(pointer_); }
+ uint8_t* end() const { return start() + size_; }
template<typename T> T Load(uintptr_t offset) const {
return *ComputeInternalPointer<T>(offset);
@@ -98,11 +98,11 @@ class MemoryRegion {
// Locate the bit with the given offset. Returns a pointer to the byte
// containing the bit, and sets bit_mask to the bit within that byte.
- byte* ComputeBitPointer(uintptr_t bit_offset, byte* bit_mask) const {
+ uint8_t* ComputeBitPointer(uintptr_t bit_offset, uint8_t* bit_mask) const {
uintptr_t bit_remainder = (bit_offset & (kBitsPerByte - 1));
*bit_mask = (1U << bit_remainder);
uintptr_t byte_offset = (bit_offset >> kBitsPerByteLog2);
- return ComputeInternalPointer<byte>(byte_offset);
+ return ComputeInternalPointer<uint8_t>(byte_offset);
}
void* pointer_;
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 13b5a8ba27..7e1ad7868a 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -136,10 +136,10 @@ class SetLengthToUsableSizeVisitor {
// DCHECK(array->IsArrayInstance());
int32_t length = (usable_size - header_size_) >> component_size_shift_;
DCHECK_GE(length, minimum_length_);
- byte* old_end = reinterpret_cast<byte*>(array->GetRawData(1U << component_size_shift_,
- minimum_length_));
- byte* new_end = reinterpret_cast<byte*>(array->GetRawData(1U << component_size_shift_,
- length));
+ uint8_t* old_end = reinterpret_cast<uint8_t*>(array->GetRawData(1U << component_size_shift_,
+ minimum_length_));
+ uint8_t* new_end = reinterpret_cast<uint8_t*>(array->GetRawData(1U << component_size_shift_,
+ length));
// Ensure space beyond original allocation is zeroed.
memset(old_end, 0, new_end - old_end);
array->SetLength(length);
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index 8447616cf5..1a65d99023 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -393,7 +393,7 @@ inline QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo() {
// Callee saves + handle scope + method ref + alignment
size_t frame_size = RoundUp(callee_info.FrameSizeInBytes() + scope_size
- - kPointerSize // callee-save frame stores a whole method pointer
+ - sizeof(void*) // callee-save frame stores a whole method pointer
+ sizeof(StackReference<mirror::ArtMethod>),
kStackAlignment);
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index de6ec05442..939d856eb7 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -385,11 +385,11 @@ class MANAGED ArtMethod FINAL : public Object {
size_t GetReturnPcOffsetInBytes(uint32_t frame_size_in_bytes)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK_EQ(frame_size_in_bytes, GetFrameSizeInBytes());
- return frame_size_in_bytes - kPointerSize;
+ return frame_size_in_bytes - sizeof(void*);
}
size_t GetHandleScopeOffsetInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return kPointerSize;
+ return sizeof(void*);
}
void RegisterNative(Thread* self, const void* native_method, bool is_fast)
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 3fcb188697..6df7204555 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -278,14 +278,15 @@ void Class::DumpClass(std::ostream& os, int flags) {
}
void Class::SetReferenceInstanceOffsets(uint32_t new_reference_offsets) {
- if (new_reference_offsets != CLASS_WALK_SUPER) {
+ if (kIsDebugBuild && (new_reference_offsets != kClassWalkSuper)) {
// Sanity check that the number of bits set in the reference offset bitmap
// agrees with the number of references
- size_t count = 0;
+ uint32_t count = 0;
for (Class* c = this; c != nullptr; c = c->GetSuperClass()) {
count += c->NumReferenceInstanceFieldsDuringLinking();
}
- CHECK_EQ((size_t)POPCOUNT(new_reference_offsets), count);
+ // +1 for the Class in Object.
+ CHECK_EQ(static_cast<uint32_t>(POPCOUNT(new_reference_offsets)) + 1, count);
}
// Not called within a transaction.
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, reference_instance_offsets_),
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 0acf6952c2..2d4912191b 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -28,40 +28,6 @@
#include "primitive.h"
#include "read_barrier_option.h"
-/*
- * A magic value for refOffsets. Ignore the bits and walk the super
- * chain when this is the value.
- * [This is an unlikely "natural" value, since it would be 30 non-ref instance
- * fields followed by 2 ref instance fields.]
- */
-#define CLASS_WALK_SUPER 3U
-#define CLASS_BITS_PER_WORD (sizeof(uint32_t) * 8)
-#define CLASS_OFFSET_ALIGNMENT 4
-#define CLASS_HIGH_BIT (1U << (CLASS_BITS_PER_WORD - 1))
-/*
- * Given an offset, return the bit number which would encode that offset.
- * Local use only.
- */
-#define _CLASS_BIT_NUMBER_FROM_OFFSET(byteOffset) \
- ((unsigned int)(byteOffset) / \
- CLASS_OFFSET_ALIGNMENT)
-/*
- * Is the given offset too large to be encoded?
- */
-#define CLASS_CAN_ENCODE_OFFSET(byteOffset) \
- (_CLASS_BIT_NUMBER_FROM_OFFSET(byteOffset) < CLASS_BITS_PER_WORD)
-/*
- * Return a single bit, encoding the offset.
- * Undefined if the offset is too large, as defined above.
- */
-#define CLASS_BIT_FROM_OFFSET(byteOffset) \
- (CLASS_HIGH_BIT >> _CLASS_BIT_NUMBER_FROM_OFFSET(byteOffset))
-/*
- * Return an offset, given a bit number as returned from CLZ.
- */
-#define CLASS_OFFSET_FROM_CLZ(rshift) \
- MemberOffset((static_cast<int>(rshift) * CLASS_OFFSET_ALIGNMENT))
-
namespace art {
struct ClassOffsets;
@@ -81,6 +47,12 @@ class IfTable;
// C++ mirror of java.lang.Class
class MANAGED Class FINAL : public Object {
public:
+ // A magic value for reference_instance_offsets_. Ignore the bits and walk the super chain when
+ // this is the value.
+ // [This is an unlikely "natural" value, since it would be 30 non-ref instance fields followed by
+ // 2 ref instance fields.]
+ static constexpr uint32_t kClassWalkSuper = 0xC0000000;
+
// Interface method table size. Increasing this value reduces the chance of two interface methods
// colliding in the interface method table but increases the size of classes that implement
// (non-marker) interfaces.
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 166ea9c3e5..b89da9d4fe 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -141,7 +141,7 @@ inline bool Object::AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object*
#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
DCHECK(kUseBakerOrBrooksReadBarrier);
MemberOffset offset = OFFSET_OF_OBJECT_MEMBER(Object, x_rb_ptr_);
- byte* raw_addr = reinterpret_cast<byte*>(this) + offset.SizeValue();
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + offset.SizeValue();
Atomic<uint32_t>* atomic_rb_ptr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
HeapReference<Object> expected_ref(HeapReference<Object>::FromMirrorPtr(expected_rb_ptr));
HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(rb_ptr));
@@ -602,7 +602,7 @@ inline bool Object::CasFieldWeakSequentiallyConsistent32(MemberOffset field_offs
if (kVerifyFlags & kVerifyThis) {
VerifyObject(this);
}
- byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
return atomic_addr->CompareExchangeWeakSequentiallyConsistent(old_value, new_value);
@@ -620,7 +620,7 @@ inline bool Object::CasFieldWeakRelaxed32(MemberOffset field_offset,
if (kVerifyFlags & kVerifyThis) {
VerifyObject(this);
}
- byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
return atomic_addr->CompareExchangeWeakRelaxed(old_value, new_value);
@@ -638,7 +638,7 @@ inline bool Object::CasFieldStrongSequentiallyConsistent32(MemberOffset field_of
if (kVerifyFlags & kVerifyThis) {
VerifyObject(this);
}
- byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
return atomic_addr->CompareExchangeStrongSequentiallyConsistent(old_value, new_value);
@@ -682,7 +682,7 @@ inline void Object::SetField64Volatile(MemberOffset field_offset, int64_t new_va
template<typename kSize, bool kIsVolatile>
inline void Object::SetField(MemberOffset field_offset, kSize new_value) {
- byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
kSize* addr = reinterpret_cast<kSize*>(raw_addr);
if (kIsVolatile) {
reinterpret_cast<Atomic<kSize>*>(addr)->StoreSequentiallyConsistent(new_value);
@@ -693,7 +693,7 @@ inline void Object::SetField(MemberOffset field_offset, kSize new_value) {
template<typename kSize, bool kIsVolatile>
inline kSize Object::GetField(MemberOffset field_offset) {
- const byte* raw_addr = reinterpret_cast<const byte*>(this) + field_offset.Int32Value();
+ const uint8_t* raw_addr = reinterpret_cast<const uint8_t*>(this) + field_offset.Int32Value();
const kSize* addr = reinterpret_cast<const kSize*>(raw_addr);
if (kIsVolatile) {
return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadSequentiallyConsistent();
@@ -714,7 +714,7 @@ inline bool Object::CasFieldWeakSequentiallyConsistent64(MemberOffset field_offs
if (kVerifyFlags & kVerifyThis) {
VerifyObject(this);
}
- byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<int64_t>* atomic_addr = reinterpret_cast<Atomic<int64_t>*>(raw_addr);
return atomic_addr->CompareExchangeWeakSequentiallyConsistent(old_value, new_value);
}
@@ -731,7 +731,7 @@ inline bool Object::CasFieldStrongSequentiallyConsistent64(MemberOffset field_of
if (kVerifyFlags & kVerifyThis) {
VerifyObject(this);
}
- byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<int64_t>* atomic_addr = reinterpret_cast<Atomic<int64_t>*>(raw_addr);
return atomic_addr->CompareExchangeStrongSequentiallyConsistent(old_value, new_value);
}
@@ -742,7 +742,7 @@ inline T* Object::GetFieldObject(MemberOffset field_offset) {
if (kVerifyFlags & kVerifyThis) {
VerifyObject(this);
}
- byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
HeapReference<T>* objref_addr = reinterpret_cast<HeapReference<T>*>(raw_addr);
T* result = ReadBarrier::Barrier<T, kReadBarrierOption>(this, field_offset, objref_addr);
if (kIsVolatile) {
@@ -782,7 +782,7 @@ inline void Object::SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset,
if (kVerifyFlags & kVerifyWrites) {
VerifyObject(new_value);
}
- byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
HeapReference<Object>* objref_addr = reinterpret_cast<HeapReference<Object>*>(raw_addr);
if (kIsVolatile) {
// TODO: Refactor to use a SequentiallyConsistent store instead.
@@ -818,7 +818,7 @@ inline HeapReference<Object>* Object::GetFieldObjectReferenceAddr(MemberOffset f
if (kVerifyFlags & kVerifyThis) {
VerifyObject(this);
}
- return reinterpret_cast<HeapReference<Object>*>(reinterpret_cast<byte*>(this) +
+ return reinterpret_cast<HeapReference<Object>*>(reinterpret_cast<uint8_t*>(this) +
field_offset.Int32Value());
}
@@ -842,7 +842,7 @@ inline bool Object::CasFieldWeakSequentiallyConsistentObject(MemberOffset field_
}
HeapReference<Object> old_ref(HeapReference<Object>::FromMirrorPtr(old_value));
HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_value));
- byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
bool success = atomic_addr->CompareExchangeWeakSequentiallyConsistent(old_ref.reference_,
@@ -874,7 +874,7 @@ inline bool Object::CasFieldStrongSequentiallyConsistentObject(MemberOffset fiel
}
HeapReference<Object> old_ref(HeapReference<Object>::FromMirrorPtr(old_value));
HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_value));
- byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
bool success = atomic_addr->CompareExchangeStrongSequentiallyConsistent(old_ref.reference_,
@@ -888,18 +888,18 @@ inline bool Object::CasFieldStrongSequentiallyConsistentObject(MemberOffset fiel
template<bool kVisitClass, bool kIsStatic, typename Visitor>
inline void Object::VisitFieldsReferences(uint32_t ref_offsets, const Visitor& visitor) {
- if (!kIsStatic && LIKELY(ref_offsets != CLASS_WALK_SUPER)) {
- if (!kVisitClass) {
- // Mask out the class from the reference offsets.
- ref_offsets ^= kWordHighBitMask;
+ if (!kIsStatic && (ref_offsets != mirror::Class::kClassWalkSuper)) {
+ // Instance fields and not the slow-path.
+ if (kVisitClass) {
+ visitor(this, ClassOffset(), kIsStatic);
}
- DCHECK_EQ(ClassOffset().Uint32Value(), 0U);
- // Found a reference offset bitmap. Visit the specified offsets.
+ uint32_t field_offset = mirror::kObjectHeaderSize;
while (ref_offsets != 0) {
- size_t right_shift = CLZ(ref_offsets);
- MemberOffset field_offset = CLASS_OFFSET_FROM_CLZ(right_shift);
- visitor(this, field_offset, kIsStatic);
- ref_offsets &= ~(CLASS_HIGH_BIT >> right_shift);
+ if ((ref_offsets & 1) != 0) {
+ visitor(this, MemberOffset(field_offset), kIsStatic);
+ }
+ ref_offsets >>= 1;
+ field_offset += sizeof(mirror::HeapReference<mirror::Object>);
}
} else {
// There is no reference offset bitmap. In the non-static case, walk up the class
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 57069ab26c..9578c97d82 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -69,8 +69,8 @@ Object* Object::CopyObject(Thread* self, mirror::Object* dest, mirror::Object* s
size_t num_bytes) {
// Copy instance data. We assume memcpy copies by words.
// TODO: expose and use move32.
- byte* src_bytes = reinterpret_cast<byte*>(src);
- byte* dst_bytes = reinterpret_cast<byte*>(dest);
+ uint8_t* src_bytes = reinterpret_cast<uint8_t*>(src);
+ uint8_t* dst_bytes = reinterpret_cast<uint8_t*>(dest);
size_t offset = sizeof(Object);
memcpy(dst_bytes + offset, src_bytes + offset, num_bytes - offset);
if (kUseBakerOrBrooksReadBarrier) {
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index b68aef9c7f..1bbcf8ef1c 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_MIRROR_OBJECT_H_
#define ART_RUNTIME_MIRROR_OBJECT_H_
+#include "globals.h"
#include "object_reference.h"
#include "offsets.h"
#include "verify_object.h"
@@ -60,6 +61,9 @@ class Throwable;
// Checks that we don't do field assignments which violate the typing system.
static constexpr bool kCheckFieldAssignments = false;
+// Size of Object.
+static constexpr uint32_t kObjectHeaderSize = kUseBakerOrBrooksReadBarrier ? 16 : 8;
+
// C++ mirror of java.lang.Object
class MANAGED LOCKABLE Object {
public:
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 7fa664d2dc..1aeba7487c 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -76,7 +76,8 @@ class ObjectTest : public CommonRuntimeTest {
// Keep constants in sync.
TEST_F(ObjectTest, Constants) {
- EXPECT_EQ(kObjectReferenceSize, sizeof(mirror::HeapReference<mirror::Object>));
+ EXPECT_EQ(kObjectReferenceSize, sizeof(HeapReference<Object>));
+ EXPECT_EQ(kObjectHeaderSize, sizeof(Object));
}
// Keep the assembly code constats in sync.
diff --git a/runtime/monitor_pool.h b/runtime/monitor_pool.h
index cb4516252b..5b9209350b 100644
--- a/runtime/monitor_pool.h
+++ b/runtime/monitor_pool.h
@@ -172,7 +172,7 @@ class MonitorPool {
// To avoid race issues when resizing, we keep all the previous arrays.
std::vector<uintptr_t*> old_chunk_arrays_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
- typedef TrackingAllocator<byte, kAllocatorTagMonitorPool> Allocator;
+ typedef TrackingAllocator<uint8_t, kAllocatorTagMonitorPool> Allocator;
Allocator allocator_;
// Start of free list of monitors.
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 23f46f41d9..ec7d82db8e 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -464,7 +464,7 @@ static void VMRuntime_preloadDexCaches(JNIEnv* env, jobject) {
class_def_index < dex_file->NumClassDefs();
class_def_index++) {
const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
- const byte* class_data = dex_file->GetClassData(class_def);
+ const uint8_t* class_data = dex_file->GetClassData(class_def);
if (class_data == NULL) {
continue;
}
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index d166be030a..7f5a611d83 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -268,6 +268,7 @@ static void SetFieldValue(ScopedFastNativeObjectAccess& soa, mirror::Object* o,
break;
}
// Else fall through to report an error.
+ FALLTHROUGH_INTENDED;
case Primitive::kPrimVoid:
// Never okay.
ThrowIllegalArgumentException(nullptr, StringPrintf("Not a primitive field: %s",
diff --git a/runtime/oat_file-inl.h b/runtime/oat_file-inl.h
index 9570bb501a..6237767d79 100644
--- a/runtime/oat_file-inl.h
+++ b/runtime/oat_file-inl.h
@@ -35,7 +35,7 @@ inline uint32_t OatFile::OatMethod::GetOatQuickMethodHeaderOffset() const {
if (method_header == nullptr) {
return 0u;
}
- return reinterpret_cast<const byte*>(method_header) - begin_;
+ return reinterpret_cast<const uint8_t*>(method_header) - begin_;
}
inline uint32_t OatFile::OatMethod::GetQuickCodeSize() const {
@@ -51,7 +51,7 @@ inline uint32_t OatFile::OatMethod::GetQuickCodeSizeOffset() const {
if (method_header == nullptr) {
return 0u;
}
- return reinterpret_cast<const byte*>(&method_header->code_size_) - begin_;
+ return reinterpret_cast<const uint8_t*>(&method_header->code_size_) - begin_;
}
inline size_t OatFile::OatMethod::GetFrameSizeInBytes() const {
@@ -88,7 +88,7 @@ inline uint32_t OatFile::OatMethod::GetMappingTableOffsetOffset() const {
if (method_header == nullptr) {
return 0u;
}
- return reinterpret_cast<const byte*>(&method_header->mapping_table_offset_) - begin_;
+ return reinterpret_cast<const uint8_t*>(&method_header->mapping_table_offset_) - begin_;
}
inline uint32_t OatFile::OatMethod::GetVmapTableOffset() const {
@@ -101,7 +101,7 @@ inline uint32_t OatFile::OatMethod::GetVmapTableOffsetOffset() const {
if (method_header == nullptr) {
return 0u;
}
- return reinterpret_cast<const byte*>(&method_header->vmap_table_offset_) - begin_;
+ return reinterpret_cast<const uint8_t*>(&method_header->vmap_table_offset_) - begin_;
}
inline const uint8_t* OatFile::OatMethod::GetMappingTable() const {
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index a8a830756f..03a398e672 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -68,7 +68,7 @@ OatFile* OatFile::OpenMemory(std::vector<uint8_t>& oat_contents,
OatFile* OatFile::Open(const std::string& filename,
const std::string& location,
- byte* requested_base,
+ uint8_t* requested_base,
bool executable,
std::string* error_msg) {
CHECK(!filename.empty()) << location;
@@ -114,7 +114,7 @@ OatFile* OatFile::OpenReadable(File* file, const std::string& location, std::str
OatFile* OatFile::OpenDlopen(const std::string& elf_filename,
const std::string& location,
- byte* requested_base,
+ uint8_t* requested_base,
std::string* error_msg) {
std::unique_ptr<OatFile> oat_file(new OatFile(location, true));
bool success = oat_file->Dlopen(elf_filename, requested_base, error_msg);
@@ -126,7 +126,7 @@ OatFile* OatFile::OpenDlopen(const std::string& elf_filename,
OatFile* OatFile::OpenElfFile(File* file,
const std::string& location,
- byte* requested_base,
+ uint8_t* requested_base,
bool writable,
bool executable,
std::string* error_msg) {
@@ -153,7 +153,7 @@ OatFile::~OatFile() {
}
}
-bool OatFile::Dlopen(const std::string& elf_filename, byte* requested_base,
+bool OatFile::Dlopen(const std::string& elf_filename, uint8_t* requested_base,
std::string* error_msg) {
char* absolute_path = realpath(elf_filename.c_str(), NULL);
if (absolute_path == NULL) {
@@ -166,7 +166,7 @@ bool OatFile::Dlopen(const std::string& elf_filename, byte* requested_base,
*error_msg = StringPrintf("Failed to dlopen '%s': %s", elf_filename.c_str(), dlerror());
return false;
}
- begin_ = reinterpret_cast<byte*>(dlsym(dlopen_handle_, "oatdata"));
+ begin_ = reinterpret_cast<uint8_t*>(dlsym(dlopen_handle_, "oatdata"));
if (begin_ == NULL) {
*error_msg = StringPrintf("Failed to find oatdata symbol in '%s': %s", elf_filename.c_str(),
dlerror());
@@ -179,7 +179,7 @@ bool OatFile::Dlopen(const std::string& elf_filename, byte* requested_base,
ReadFileToString("/proc/self/maps", error_msg);
return false;
}
- end_ = reinterpret_cast<byte*>(dlsym(dlopen_handle_, "oatlastword"));
+ end_ = reinterpret_cast<uint8_t*>(dlsym(dlopen_handle_, "oatlastword"));
if (end_ == NULL) {
*error_msg = StringPrintf("Failed to find oatlastword symbol in '%s': %s", elf_filename.c_str(),
dlerror());
@@ -190,7 +190,7 @@ bool OatFile::Dlopen(const std::string& elf_filename, byte* requested_base,
return Setup(error_msg);
}
-bool OatFile::ElfFileOpen(File* file, byte* requested_base, bool writable, bool executable,
+bool OatFile::ElfFileOpen(File* file, uint8_t* requested_base, bool writable, bool executable,
std::string* error_msg) {
elf_file_.reset(ElfFile::Open(file, writable, true, error_msg));
if (elf_file_.get() == nullptr) {
@@ -229,7 +229,7 @@ bool OatFile::Setup(std::string* error_msg) {
*error_msg = StringPrintf("Invalid oat magic for '%s'", GetLocation().c_str());
return false;
}
- const byte* oat = Begin();
+ const uint8_t* oat = Begin();
oat += sizeof(OatHeader);
if (oat > End()) {
*error_msg = StringPrintf("In oat file '%s' found truncated OatHeader", GetLocation().c_str());
@@ -350,12 +350,12 @@ const OatHeader& OatFile::GetOatHeader() const {
return *reinterpret_cast<const OatHeader*>(Begin());
}
-const byte* OatFile::Begin() const {
+const uint8_t* OatFile::Begin() const {
CHECK(begin_ != NULL);
return begin_;
}
-const byte* OatFile::End() const {
+const uint8_t* OatFile::End() const {
CHECK(end_ != NULL);
return end_;
}
@@ -436,7 +436,7 @@ OatFile::OatDexFile::OatDexFile(const OatFile* oat_file,
const std::string& dex_file_location,
const std::string& canonical_dex_file_location,
uint32_t dex_file_location_checksum,
- const byte* dex_file_pointer,
+ const uint8_t* dex_file_pointer,
const uint32_t* oat_class_offsets_pointer)
: oat_file_(oat_file),
dex_file_location_(dex_file_location),
@@ -463,26 +463,26 @@ uint32_t OatFile::OatDexFile::GetOatClassOffset(uint16_t class_def_index) const
OatFile::OatClass OatFile::OatDexFile::GetOatClass(uint16_t class_def_index) const {
uint32_t oat_class_offset = GetOatClassOffset(class_def_index);
- const byte* oat_class_pointer = oat_file_->Begin() + oat_class_offset;
+ const uint8_t* oat_class_pointer = oat_file_->Begin() + oat_class_offset;
CHECK_LT(oat_class_pointer, oat_file_->End()) << oat_file_->GetLocation();
- const byte* status_pointer = oat_class_pointer;
+ const uint8_t* status_pointer = oat_class_pointer;
CHECK_LT(status_pointer, oat_file_->End()) << oat_file_->GetLocation();
mirror::Class::Status status =
static_cast<mirror::Class::Status>(*reinterpret_cast<const int16_t*>(status_pointer));
CHECK_LT(status, mirror::Class::kStatusMax);
- const byte* type_pointer = status_pointer + sizeof(uint16_t);
+ const uint8_t* type_pointer = status_pointer + sizeof(uint16_t);
CHECK_LT(type_pointer, oat_file_->End()) << oat_file_->GetLocation();
OatClassType type = static_cast<OatClassType>(*reinterpret_cast<const uint16_t*>(type_pointer));
CHECK_LT(type, kOatClassMax);
- const byte* after_type_pointer = type_pointer + sizeof(int16_t);
+ const uint8_t* after_type_pointer = type_pointer + sizeof(int16_t);
CHECK_LE(after_type_pointer, oat_file_->End()) << oat_file_->GetLocation();
uint32_t bitmap_size = 0;
- const byte* bitmap_pointer = nullptr;
- const byte* methods_pointer = nullptr;
+ const uint8_t* bitmap_pointer = nullptr;
+ const uint8_t* methods_pointer = nullptr;
if (type != kOatClassNoneCompiled) {
if (type == kOatClassSomeCompiled) {
bitmap_size = static_cast<uint32_t>(*reinterpret_cast<const uint32_t*>(after_type_pointer));
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index b9d5702b0f..734b9b3587 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -48,7 +48,7 @@ class OatFile {
// optionally be used to request where the file should be loaded.
static OatFile* Open(const std::string& filename,
const std::string& location,
- byte* requested_base,
+ uint8_t* requested_base,
bool executable,
std::string* error_msg);
@@ -148,7 +148,7 @@ class OatFile {
uint32_t GetVmapTableOffsetOffset() const;
// Create an OatMethod with offsets relative to the given base address
- OatMethod(const byte* base, const uint32_t code_offset, const uint32_t gc_map_offset)
+ OatMethod(const uint8_t* base, const uint32_t code_offset, const uint32_t gc_map_offset)
: begin_(base),
code_offset_(code_offset),
native_gc_map_offset_(gc_map_offset) {
@@ -170,7 +170,7 @@ class OatFile {
return reinterpret_cast<T>(begin_ + offset);
}
- const byte* const begin_;
+ const uint8_t* const begin_;
const uint32_t code_offset_;
const uint32_t native_gc_map_offset_;
@@ -272,14 +272,14 @@ class OatFile {
const std::string& dex_file_location,
const std::string& canonical_dex_file_location,
uint32_t dex_file_checksum,
- const byte* dex_file_pointer,
+ const uint8_t* dex_file_pointer,
const uint32_t* oat_class_offsets_pointer);
const OatFile* const oat_file_;
const std::string dex_file_location_;
const std::string canonical_dex_file_location_;
const uint32_t dex_file_location_checksum_;
- const byte* const dex_file_pointer_;
+ const uint8_t* const dex_file_pointer_;
const uint32_t* const oat_class_offsets_pointer_;
friend class OatFile;
@@ -299,27 +299,27 @@ class OatFile {
return End() - Begin();
}
- const byte* Begin() const;
- const byte* End() const;
+ const uint8_t* Begin() const;
+ const uint8_t* End() const;
private:
static void CheckLocation(const std::string& location);
static OatFile* OpenDlopen(const std::string& elf_filename,
const std::string& location,
- byte* requested_base,
+ uint8_t* requested_base,
std::string* error_msg);
static OatFile* OpenElfFile(File* file,
const std::string& location,
- byte* requested_base,
+ uint8_t* requested_base,
bool writable,
bool executable,
std::string* error_msg);
explicit OatFile(const std::string& filename, bool executable);
- bool Dlopen(const std::string& elf_filename, byte* requested_base, std::string* error_msg);
- bool ElfFileOpen(File* file, byte* requested_base, bool writable, bool executable,
+ bool Dlopen(const std::string& elf_filename, uint8_t* requested_base, std::string* error_msg);
+ bool ElfFileOpen(File* file, uint8_t* requested_base, bool writable, bool executable,
std::string* error_msg);
bool Setup(std::string* error_msg);
@@ -329,10 +329,10 @@ class OatFile {
const std::string location_;
// Pointer to OatHeader.
- const byte* begin_;
+ const uint8_t* begin_;
// Pointer to end of oat region for bounds checking.
- const byte* end_;
+ const uint8_t* end_;
// Was this oat_file loaded executable?
const bool is_executable_;
diff --git a/runtime/stack.cc b/runtime/stack.cc
index b8b10d26e0..008941fcee 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -392,16 +392,16 @@ bool StackVisitor::SetFPR(uint32_t reg, uintptr_t value) {
}
uintptr_t StackVisitor::GetReturnPc() const {
- byte* sp = reinterpret_cast<byte*>(GetCurrentQuickFrame());
+ uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
DCHECK(sp != NULL);
- byte* pc_addr = sp + GetMethod()->GetReturnPcOffsetInBytes();
+ uint8_t* pc_addr = sp + GetMethod()->GetReturnPcOffsetInBytes();
return *reinterpret_cast<uintptr_t*>(pc_addr);
}
void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
- byte* sp = reinterpret_cast<byte*>(GetCurrentQuickFrame());
+ uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
CHECK(sp != NULL);
- byte* pc_addr = sp + GetMethod()->GetReturnPcOffsetInBytes();
+ uint8_t* pc_addr = sp + GetMethod()->GetReturnPcOffsetInBytes();
*reinterpret_cast<uintptr_t*>(pc_addr) = new_ret_pc;
}
@@ -544,7 +544,7 @@ void StackVisitor::WalkStack(bool include_transitions) {
size_t frame_size = method->GetFrameSizeInBytes();
// Compute PC for next stack frame from return PC.
size_t return_pc_offset = method->GetReturnPcOffsetInBytes(frame_size);
- byte* return_pc_addr = reinterpret_cast<byte*>(cur_quick_frame_) + return_pc_offset;
+ uint8_t* return_pc_addr = reinterpret_cast<uint8_t*>(cur_quick_frame_) + return_pc_offset;
uintptr_t return_pc = *reinterpret_cast<uintptr_t*>(return_pc_addr);
if (UNLIKELY(exit_stubs_installed)) {
// While profiling, the return pc is restored from the side stack, except when walking
@@ -574,7 +574,7 @@ void StackVisitor::WalkStack(bool include_transitions) {
}
}
cur_quick_frame_pc_ = return_pc;
- byte* next_frame = reinterpret_cast<byte*>(cur_quick_frame_) + frame_size;
+ uint8_t* next_frame = reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size;
cur_quick_frame_ = reinterpret_cast<StackReference<mirror::ArtMethod>*>(next_frame);
cur_depth_++;
method = cur_quick_frame_->AsMirrorPtr();
diff --git a/runtime/stack.h b/runtime/stack.h
index 44e36c478e..25e50a1a1f 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -484,10 +484,10 @@ class StackVisitor {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Callee saves are held at the top of the frame
DCHECK(GetMethod() != nullptr);
- byte* save_addr =
- reinterpret_cast<byte*>(cur_quick_frame_) + frame_size - ((num + 1) * kPointerSize);
+ uint8_t* save_addr =
+ reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size - ((num + 1) * sizeof(void*));
#if defined(__i386__) || defined(__x86_64__)
- save_addr -= kPointerSize; // account for return address
+ save_addr -= sizeof(void*); // account for return address
#endif
return reinterpret_cast<uintptr_t*>(save_addr);
}
@@ -557,7 +557,7 @@ class StackVisitor {
uint16_t vreg) const {
int offset = GetVRegOffset(code_item, core_spills, fp_spills, frame_size, vreg, kRuntimeISA);
DCHECK_EQ(cur_quick_frame, GetCurrentQuickFrame());
- byte* vreg_addr = reinterpret_cast<byte*>(cur_quick_frame) + offset;
+ uint8_t* vreg_addr = reinterpret_cast<uint8_t*>(cur_quick_frame) + offset;
return reinterpret_cast<uint32_t*>(vreg_addr);
}
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 170cec68d1..e1b5b91c31 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -198,9 +198,9 @@ inline bool Thread::PushOnThreadLocalAllocationStack(mirror::Object* obj) {
DCHECK_LE(tlsPtr_.thread_local_alloc_stack_top, tlsPtr_.thread_local_alloc_stack_end);
if (tlsPtr_.thread_local_alloc_stack_top < tlsPtr_.thread_local_alloc_stack_end) {
// There's room.
- DCHECK_LE(reinterpret_cast<byte*>(tlsPtr_.thread_local_alloc_stack_top) +
+ DCHECK_LE(reinterpret_cast<uint8_t*>(tlsPtr_.thread_local_alloc_stack_top) +
sizeof(mirror::Object*),
- reinterpret_cast<byte*>(tlsPtr_.thread_local_alloc_stack_end));
+ reinterpret_cast<uint8_t*>(tlsPtr_.thread_local_alloc_stack_end));
DCHECK(*tlsPtr_.thread_local_alloc_stack_top == nullptr);
*tlsPtr_.thread_local_alloc_stack_top = obj;
++tlsPtr_.thread_local_alloc_stack_top;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 07657d1422..b0c8fe1b3a 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -235,7 +235,7 @@ static size_t FixStackSize(size_t stack_size) {
}
// Global variable to prevent the compiler optimizing away the page reads for the stack.
-byte dont_optimize_this;
+uint8_t dont_optimize_this;
// Install a protected region in the stack. This is used to trigger a SIGSEGV if a stack
// overflow is detected. It is located right below the stack_begin_.
@@ -249,9 +249,9 @@ byte dont_optimize_this;
// this by reading every page from the stack bottom (highest address) to the stack top.
// We then madvise this away.
void Thread::InstallImplicitProtection() {
- byte* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
- byte* stack_himem = tlsPtr_.stack_end;
- byte* stack_top = reinterpret_cast<byte*>(reinterpret_cast<uintptr_t>(&stack_himem) &
+ uint8_t* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
+ uint8_t* stack_himem = tlsPtr_.stack_end;
+ uint8_t* stack_top = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(&stack_himem) &
~(kPageSize - 1)); // Page containing current top of stack.
// First remove the protection on the protected region as will want to read and
@@ -265,7 +265,7 @@ void Thread::InstallImplicitProtection() {
// a segv.
// Read every page from the high address to the low.
- for (byte* p = stack_top; p >= pregion; p -= kPageSize) {
+ for (uint8_t* p = stack_top; p >= pregion; p -= kPageSize) {
dont_optimize_this = *p;
}
@@ -496,7 +496,7 @@ void Thread::InitStackHwm() {
PrettySize(read_stack_size).c_str(),
PrettySize(read_guard_size).c_str());
- tlsPtr_.stack_begin = reinterpret_cast<byte*>(read_stack_base);
+ tlsPtr_.stack_begin = reinterpret_cast<uint8_t*>(read_stack_base);
tlsPtr_.stack_size = read_stack_size;
// The minimum stack size we can cope with is the overflow reserved bytes (typically
@@ -2264,7 +2264,7 @@ void Thread::SetStackEndForStackOverflow() {
}
}
-void Thread::SetTlab(byte* start, byte* end) {
+void Thread::SetTlab(uint8_t* start, uint8_t* end) {
DCHECK_LE(start, end);
tlsPtr_.thread_local_start = start;
tlsPtr_.thread_local_pos = tlsPtr_.thread_local_start;
diff --git a/runtime/thread.h b/runtime/thread.h
index 6c427b8d50..998e47275c 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -584,7 +584,7 @@ class Thread {
return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
}
- byte* GetStackEndForInterpreter(bool implicit_overflow_check) const {
+ uint8_t* GetStackEndForInterpreter(bool implicit_overflow_check) const {
if (implicit_overflow_check) {
// The interpreter needs the extra overflow bytes that stack_end does
// not include.
@@ -594,7 +594,7 @@ class Thread {
}
}
- byte* GetStackEnd() const {
+ uint8_t* GetStackEnd() const {
return tlsPtr_.stack_end;
}
@@ -790,7 +790,7 @@ class Thread {
size_t TlabSize() const;
// Doesn't check that there is room.
mirror::Object* AllocTlab(size_t bytes);
- void SetTlab(byte* start, byte* end);
+ void SetTlab(uint8_t* start, uint8_t* end);
bool HasTlab() const;
// Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
@@ -1043,14 +1043,14 @@ class Thread {
}
// The biased card table, see CardTable for details.
- byte* card_table;
+ uint8_t* card_table;
// The pending exception or NULL.
mirror::Throwable* exception;
// The end of this thread's stack. This is the lowest safely-addressable address on the stack.
// We leave extra space so there's room for the code that throws StackOverflowError.
- byte* stack_end;
+ uint8_t* stack_end;
// The top of the managed stack often manipulated directly by compiler generated code.
ManagedStack managed_stack;
@@ -1073,7 +1073,7 @@ class Thread {
jobject jpeer;
// The "lowest addressable byte" of the stack.
- byte* stack_begin;
+ uint8_t* stack_begin;
// Size of the stack.
size_t stack_size;
@@ -1137,9 +1137,9 @@ class Thread {
QuickEntryPoints quick_entrypoints;
// Thread-local allocation pointer.
- byte* thread_local_start;
- byte* thread_local_pos;
- byte* thread_local_end;
+ uint8_t* thread_local_start;
+ uint8_t* thread_local_pos;
+ uint8_t* thread_local_end;
size_t thread_local_objects;
// There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 53c2859fb0..35411e2660 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -51,7 +51,13 @@ ThreadList::~ThreadList() {
// Detach the current thread if necessary. If we failed to start, there might not be any threads.
// We need to detach the current thread here in case there's another thread waiting to join with
// us.
- if (Contains(Thread::Current())) {
+ bool contains = false;
+ {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, *Locks::thread_list_lock_);
+ contains = Contains(self);
+ }
+ if (contains) {
Runtime::Current()->DetachCurrentThread();
}
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 0496d97ae7..0688c1a784 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -92,7 +92,7 @@ void GetThreadStack(pthread_t thread, void** stack_base, size_t* stack_size, siz
// (On Mac OS 10.7, it's the end.)
int stack_variable;
if (stack_addr > &stack_variable) {
- *stack_base = reinterpret_cast<byte*>(stack_addr) - *stack_size;
+ *stack_base = reinterpret_cast<uint8_t*>(stack_addr) - *stack_size;
} else {
*stack_base = stack_addr;
}
@@ -1369,11 +1369,11 @@ bool IsZipMagic(uint32_t magic) {
}
bool IsDexMagic(uint32_t magic) {
- return DexFile::IsMagicValid(reinterpret_cast<const byte*>(&magic));
+ return DexFile::IsMagicValid(reinterpret_cast<const uint8_t*>(&magic));
}
bool IsOatMagic(uint32_t magic) {
- return (memcmp(reinterpret_cast<const byte*>(magic),
+ return (memcmp(reinterpret_cast<const uint8_t*>(magic),
OatHeader::kOatMagic,
sizeof(OatHeader::kOatMagic)) == 0);
}
diff --git a/runtime/utils.h b/runtime/utils.h
index 4fcd380a8d..53b49c8fd4 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -108,23 +108,23 @@ static inline bool IsAlignedParam(T x, int n) {
DCHECK(::art::IsAlignedParam(value, alignment)) << reinterpret_cast<const void*>(value)
// Check whether an N-bit two's-complement representation can hold value.
-static inline bool IsInt(int N, word value) {
+static inline bool IsInt(int N, intptr_t value) {
CHECK_LT(0, N);
- CHECK_LT(N, kBitsPerWord);
- word limit = static_cast<word>(1) << (N - 1);
+ CHECK_LT(N, kBitsPerIntPtrT);
+ intptr_t limit = static_cast<intptr_t>(1) << (N - 1);
return (-limit <= value) && (value < limit);
}
-static inline bool IsUint(int N, word value) {
+static inline bool IsUint(int N, intptr_t value) {
CHECK_LT(0, N);
- CHECK_LT(N, kBitsPerWord);
- word limit = static_cast<word>(1) << N;
+ CHECK_LT(N, kBitsPerIntPtrT);
+ intptr_t limit = static_cast<intptr_t>(1) << N;
return (0 <= value) && (value < limit);
}
-static inline bool IsAbsoluteUint(int N, word value) {
+static inline bool IsAbsoluteUint(int N, intptr_t value) {
CHECK_LT(0, N);
- CHECK_LT(N, kBitsPerWord);
+ CHECK_LT(N, kBitsPerIntPtrT);
if (value < 0) value = -value;
return IsUint(N, value);
}
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 9747b4e46c..fb07ba0c2d 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -133,7 +133,7 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(Thread* self,
bool allow_soft_failures,
std::string* error) {
DCHECK(class_def != nullptr);
- const byte* class_data = dex_file->GetClassData(*class_def);
+ const uint8_t* class_data = dex_file->GetClassData(*class_def);
if (class_data == nullptr) {
// empty class, probably a marker interface
return kNoFailure;
@@ -659,7 +659,7 @@ bool MethodVerifier::ScanTryCatchBlocks() {
}
}
// Iterate over each of the handlers to verify target addresses.
- const byte* handlers_ptr = DexFile::GetCatchHandlerData(*code_item_, 0);
+ const uint8_t* handlers_ptr = DexFile::GetCatchHandlerData(*code_item_, 0);
uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
ClassLinker* linker = Runtime::Current()->GetClassLinker();
for (uint32_t idx = 0; idx < handlers_size; idx++) {
@@ -3012,7 +3012,7 @@ const RegType& MethodVerifier::ResolveClassAndCheckAccess(uint32_t class_idx) {
const RegType& MethodVerifier::GetCaughtExceptionType() {
const RegType* common_super = nullptr;
if (code_item_->tries_size_ != 0) {
- const byte* handlers_ptr = DexFile::GetCatchHandlerData(*code_item_, 0);
+ const uint8_t* handlers_ptr = DexFile::GetCatchHandlerData(*code_item_, 0);
uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
for (uint32_t i = 0; i < handlers_size; i++) {
CatchHandlerIterator iterator(handlers_ptr);
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index ae5b08f174..5ca8bec9aa 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -316,7 +316,7 @@ endif
# {5: trace or no-trace}-{6: gcstress gcverify cms}-{7: forcecopy checkjni jni}-
# {8: no-image image}-{9: test name}{10: 32 or 64}
define define-test-art-run-test
- run_test_options := $(addprefix --runtime-option ,$(DALVIKVM_FLAGS))
+ run_test_options :=
prereq_rule :=
test_groups :=
uc_host_or_target :=