Revert "JIT root tables."
May be the offender for jit-gcstress failure of 902.
This reverts commit ac3ebc3150760425ed00abd56da48f9a6e0666bc.
Change-Id: I9ea6c9236fd1729fed7d1868dd8a111172932308
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index a5f248d..8b450e1 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1375,30 +1375,4 @@
return klass->GetDisableIntrinsicFlagOffset().Uint32Value();
}
-void CodeGenerator::EmitJitRoots(uint8_t* code,
- Handle<mirror::ObjectArray<mirror::Object>> roots,
- const uint8_t* roots_data,
- Handle<mirror::DexCache> outer_dex_cache) {
- DCHECK_EQ(static_cast<size_t>(roots->GetLength()), GetNumberOfJitRoots());
- StackHandleScope<1> hs(Thread::Current());
- MutableHandle<mirror::DexCache> h_dex_cache(hs.NewHandle<mirror::DexCache>(nullptr));
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- size_t index = 0;
- for (auto& entry : jit_string_roots_) {
- const DexFile& entry_dex_file = *entry.first.dex_file;
- // Avoid the expensive FindDexCache call by checking if the string is
- // in the compiled method's dex file.
- h_dex_cache.Assign(IsSameDexFile(*outer_dex_cache->GetDexFile(), entry_dex_file)
- ? outer_dex_cache.Get()
- : class_linker->FindDexCache(hs.Self(), entry_dex_file));
- mirror::String* string = class_linker->LookupString(
- entry_dex_file, entry.first.string_index, h_dex_cache);
- DCHECK(string != nullptr) << "JIT roots require strings to have been loaded";
- roots->Set(index, string);
- entry.second = index;
- ++index;
- }
- EmitJitRootPatches(code, roots_data);
-}
-
} // namespace art
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 212d571..a81f24e 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -31,7 +31,6 @@
#include "nodes.h"
#include "optimizing_compiler_stats.h"
#include "stack_map_stream.h"
-#include "string_reference.h"
#include "utils/label.h"
namespace art {
@@ -332,17 +331,6 @@
void BuildStackMaps(MemoryRegion region, const DexFile::CodeItem& code_item);
size_t ComputeStackMapsSize();
- size_t GetNumberOfJitRoots() const {
- return jit_string_roots_.size();
- }
-
- // Fills the `literals` array with literals collected during code generation.
- // Also emits literal patches.
- void EmitJitRoots(uint8_t* code,
- Handle<mirror::ObjectArray<mirror::Object>> roots,
- const uint8_t* roots_data,
- Handle<mirror::DexCache> outer_dex_cache)
- REQUIRES_SHARED(Locks::mutator_lock_);
bool IsLeafMethod() const {
return is_leaf_;
@@ -579,8 +567,6 @@
fpu_callee_save_mask_(fpu_callee_save_mask),
stack_map_stream_(graph->GetArena()),
block_order_(nullptr),
- jit_string_roots_(StringReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
disasm_info_(nullptr),
stats_(stats),
graph_(graph),
@@ -647,12 +633,6 @@
return current_slow_path_;
}
- // Emit the patches assocatied with JIT roots. Only applies to JIT compiled code.
- virtual void EmitJitRootPatches(uint8_t* code ATTRIBUTE_UNUSED,
- const uint8_t* roots_data ATTRIBUTE_UNUSED) {
- DCHECK_EQ(jit_string_roots_.size(), 0u);
- }
-
// Frame size required for this method.
uint32_t frame_size_;
uint32_t core_spill_mask_;
@@ -678,11 +658,6 @@
// The order to use for code generation.
const ArenaVector<HBasicBlock*>* block_order_;
- // Maps a StringReference (dex_file, string_index) to the index in the literal table.
- // Entries are intially added with a 0 index, and `EmitJitRoots` will compute all the
- // indices.
- ArenaSafeMap<StringReference, size_t, StringReferenceValueComparator> jit_string_roots_;
-
DisassemblyInformation* disasm_info_;
private:
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 90ec65b..57823c9 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -5877,9 +5877,6 @@
case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
- case HLoadString::LoadKind::kJitTableAddress:
- DCHECK(Runtime::Current()->UseJitCompilation());
- return HLoadString::LoadKind::kDexCacheViaMethod;
case HLoadString::LoadKind::kDexCacheViaMethod:
break;
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 1aafecd..b411a43 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -4422,9 +4422,6 @@
break;
case HLoadString::LoadKind::kDexCacheViaMethod:
break;
- case HLoadString::LoadKind::kJitTableAddress:
- DCHECK(Runtime::Current()->UseJitCompilation());
- return HLoadString::LoadKind::kDexCacheViaMethod;
}
return desired_string_load_kind;
}
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index eedd4d5..12b1ab9 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -5204,11 +5204,6 @@
case HLoadString::LoadKind::kDexCacheViaMethod:
fallback_load = false;
break;
- case HLoadString::LoadKind::kJitTableAddress:
- DCHECK(Runtime::Current()->UseJitCompilation());
- // TODO: implement.
- fallback_load = true;
- break;
}
if (fallback_load) {
desired_string_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 9ac8276..2f946e4 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -6217,9 +6217,6 @@
break;
case HLoadString::LoadKind::kDexCacheViaMethod:
break;
- case HLoadString::LoadKind::kJitTableAddress:
- DCHECK(Runtime::Current()->UseJitCompilation());
- return HLoadString::LoadKind::kDexCacheViaMethod;
}
return desired_string_load_kind;
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 117a43d..232c3b3 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1263,8 +1263,7 @@
simple_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
}
@@ -5633,9 +5632,6 @@
break;
case HLoadString::LoadKind::kDexCacheViaMethod:
break;
- case HLoadString::LoadKind::kJitTableAddress:
- DCHECK(Runtime::Current()->UseJitCompilation());
- break;
}
return desired_string_load_kind;
}
@@ -5665,14 +5661,6 @@
}
}
-Label* CodeGeneratorX86_64::NewJitRootStringPatch(const DexFile& dex_file, uint32_t dex_index) {
- jit_string_roots_.Overwrite(StringReference(&dex_file, dex_index), /* placeholder */ 0u);
- // Add a patch entry and return the label.
- jit_string_patches_.emplace_back(dex_file, dex_index);
- PatchInfo<Label>* info = &jit_string_patches_.back();
- return &info->label;
-}
-
void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) {
LocationSummary* locations = load->GetLocations();
Location out_loc = locations->Out();
@@ -5704,15 +5692,6 @@
__ Bind(slow_path->GetExitLabel());
return;
}
- case HLoadString::LoadKind::kJitTableAddress: {
- Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
- /* no_rip */ true);
- Label* fixup_label =
- codegen_->NewJitRootStringPatch(load->GetDexFile(), load->GetStringIndex());
- // /* GcRoot<mirror::String> */ out = *address
- GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kEmitCompilerReadBarrier);
- return;
- }
default:
break;
}
@@ -7150,20 +7129,6 @@
}
}
-void CodeGeneratorX86_64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
- for (const PatchInfo<Label>& info : jit_string_patches_) {
- const auto& it = jit_string_roots_.find(StringReference(&info.dex_file, info.index));
- DCHECK(it != jit_string_roots_.end());
- size_t index_in_table = it->second;
- uint32_t code_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
- uintptr_t address =
- reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
- typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t;
- reinterpret_cast<unaligned_uint32_t*>(code + code_offset)[0] =
- dchecked_integral_cast<uint32_t>(address);
- }
-}
-
#undef __
} // namespace x86_64
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index bc78b8c..5a6dc54 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -411,14 +411,11 @@
void RecordTypePatch(HLoadClass* load_class);
Label* NewStringBssEntryPatch(HLoadString* load_string);
Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, uint32_t element_offset);
- Label* NewJitRootStringPatch(const DexFile& dex_file, uint32_t dex_index);
void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
- void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
-
const X86_64InstructionSetFeatures& GetInstructionSetFeatures() const {
return isa_features_;
}
@@ -604,9 +601,6 @@
// Fixups for jump tables need to be handled specially.
ArenaVector<JumpTableRIPFixup*> fixups_to_jump_tables_;
- // Patches for string literals in JIT compiled code.
- ArenaDeque<PatchInfo<Label>> jit_string_patches_;
-
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorX86_64);
};
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index a946e31..ce2edde 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -5692,10 +5692,7 @@
// all other types are unavailable.
kDexCacheViaMethod,
- // Load from the root table associated with the JIT compiled method.
- kJitTableAddress,
-
- kLast = kJitTableAddress,
+ kLast = kDexCacheViaMethod
};
HLoadString(HCurrentMethod* current_method,
@@ -5753,8 +5750,7 @@
LoadKind load_kind = GetLoadKind();
if (load_kind == LoadKind::kBootImageLinkTimeAddress ||
load_kind == LoadKind::kBootImageLinkTimePcRelative ||
- load_kind == LoadKind::kBootImageAddress ||
- load_kind == LoadKind::kJitTableAddress) {
+ load_kind == LoadKind::kBootImageAddress) {
return false;
}
return !IsInDexCache();
@@ -5807,8 +5803,7 @@
return load_kind == LoadKind::kBootImageLinkTimeAddress ||
load_kind == LoadKind::kBootImageLinkTimePcRelative ||
load_kind == LoadKind::kBssEntry ||
- load_kind == LoadKind::kDexCacheViaMethod ||
- load_kind == LoadKind::kJitTableAddress;
+ load_kind == LoadKind::kDexCacheViaMethod;
}
static bool HasAddress(LoadKind load_kind) {
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 85f6871..a484760 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -117,7 +117,6 @@
size_t GetSize() const { return size_; }
const ArenaVector<uint8_t>& GetMemory() const { return memory_; }
- uint8_t* GetData() { return memory_.data(); }
private:
ArenaVector<uint8_t> memory_;
@@ -1127,7 +1126,7 @@
jit::JitCodeCache* code_cache,
ArtMethod* method,
bool osr) {
- StackHandleScope<3> hs(self);
+ StackHandleScope<2> hs(self);
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
method->GetDeclaringClass()->GetClassLoader()));
Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
@@ -1173,43 +1172,22 @@
}
size_t stack_map_size = codegen->ComputeStackMapsSize();
- size_t number_of_roots = codegen->GetNumberOfJitRoots();
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- // We allocate an object array to ensure the JIT roots that we will collect in EmitJitRoots
- // will be visible by the GC between EmitLiterals and CommitCode. Once CommitCode is
- // executed, this array is not needed.
- Handle<mirror::ObjectArray<mirror::Object>> roots(
- hs.NewHandle(mirror::ObjectArray<mirror::Object>::Alloc(
- self, class_linker->GetClassRoot(ClassLinker::kObjectArrayClass), number_of_roots)));
- if (roots.Get() == nullptr) {
- // Out of memory, just clear the exception to avoid any Java exception uncaught problems.
- DCHECK(self->IsExceptionPending());
- self->ClearException();
- return false;
- }
- uint8_t* stack_map_data = nullptr;
- uint8_t* roots_data = nullptr;
- code_cache->ReserveData(
- self, stack_map_size, number_of_roots, method, &stack_map_data, &roots_data);
- if (stack_map_data == nullptr || roots_data == nullptr) {
+ uint8_t* stack_map_data = code_cache->ReserveData(self, stack_map_size, method);
+ if (stack_map_data == nullptr) {
return false;
}
MaybeRecordStat(MethodCompilationStat::kCompiled);
codegen->BuildStackMaps(MemoryRegion(stack_map_data, stack_map_size), *code_item);
- codegen->EmitJitRoots(code_allocator.GetData(), roots, roots_data, dex_cache);
-
const void* code = code_cache->CommitCode(
self,
method,
stack_map_data,
- roots_data,
codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(),
codegen->GetCoreSpillMask(),
codegen->GetFpuSpillMask(),
code_allocator.GetMemory().data(),
code_allocator.GetSize(),
- osr,
- roots);
+ osr);
if (code == nullptr) {
code_cache->ClearData(self, stack_map_data);
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 13e4494..fd1db59 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -281,8 +281,7 @@
: hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file));
if (codegen_->GetCompilerOptions().IsBootImage()) {
- // Compiling boot image. Resolve the string and allocate it if needed, to ensure
- // the string will be added to the boot image.
+ // Compiling boot image. Resolve the string and allocate it if needed.
DCHECK(!runtime->UseJitCompilation());
mirror::String* string = class_linker->ResolveString(dex_file, string_index, dex_cache);
CHECK(string != nullptr);
@@ -298,14 +297,10 @@
} else if (runtime->UseJitCompilation()) {
// TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
// DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
- mirror::String* string = class_linker->LookupString(dex_file, string_index, dex_cache);
- if (string != nullptr) {
- if (runtime->GetHeap()->ObjectIsInBootImageSpace(string)) {
- desired_load_kind = HLoadString::LoadKind::kBootImageAddress;
- address = reinterpret_cast64<uint64_t>(string);
- } else {
- desired_load_kind = HLoadString::LoadKind::kJitTableAddress;
- }
+ mirror::String* string = dex_cache->GetResolvedString(string_index);
+ if (string != nullptr && runtime->GetHeap()->ObjectIsInBootImageSpace(string)) {
+ desired_load_kind = HLoadString::LoadKind::kBootImageAddress;
+ address = reinterpret_cast64<uint64_t>(string);
}
} else {
// AOT app compilation. Try to lookup the string without allocating if not found.
@@ -327,7 +322,6 @@
case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
case HLoadString::LoadKind::kBssEntry:
case HLoadString::LoadKind::kDexCacheViaMethod:
- case HLoadString::LoadKind::kJitTableAddress:
load_string->SetLoadKindWithStringReference(load_kind, dex_file, string_index);
break;
case HLoadString::LoadKind::kBootImageAddress:
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 2fbf5ef..a26d850 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -80,18 +80,8 @@
std::string error_str;
// Map name specific for android_os_Debug.cpp accounting.
- // Map in low 4gb to simplify accessing root tables for x86_64.
- // We could do PC-relative addressing to avoid this problem, but that
- // would require reserving code and data area before submitting, which
- // means more windows for the code memory to be RWX.
MemMap* data_map = MemMap::MapAnonymous(
- "data-code-cache", nullptr,
- max_capacity,
- kProtAll,
- /* low_4gb */ true,
- /* reuse */ false,
- &error_str,
- use_ashmem);
+ "data-code-cache", nullptr, max_capacity, kProtAll, false, false, &error_str, use_ashmem);
if (data_map == nullptr) {
std::ostringstream oss;
oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity;
@@ -207,40 +197,34 @@
uint8_t* JitCodeCache::CommitCode(Thread* self,
ArtMethod* method,
- uint8_t* stack_map,
- uint8_t* roots_data,
+ const uint8_t* vmap_table,
size_t frame_size_in_bytes,
size_t core_spill_mask,
size_t fp_spill_mask,
const uint8_t* code,
size_t code_size,
- bool osr,
- Handle<mirror::ObjectArray<mirror::Object>> roots) {
+ bool osr) {
uint8_t* result = CommitCodeInternal(self,
method,
- stack_map,
- roots_data,
+ vmap_table,
frame_size_in_bytes,
core_spill_mask,
fp_spill_mask,
code,
code_size,
- osr,
- roots);
+ osr);
if (result == nullptr) {
// Retry.
GarbageCollectCache(self);
result = CommitCodeInternal(self,
method,
- stack_map,
- roots_data,
+ vmap_table,
frame_size_in_bytes,
core_spill_mask,
fp_spill_mask,
code,
code_size,
- osr,
- roots);
+ osr);
}
return result;
}
@@ -259,66 +243,20 @@
return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment);
}
-static uint32_t ComputeRootTableSize(uint32_t number_of_roots) {
- return sizeof(uint32_t) + number_of_roots * sizeof(GcRoot<mirror::Object>);
-}
-
-static uint32_t GetNumberOfRoots(const uint8_t* stack_map) {
- // The length of the table is stored just before the stack map (and therefore at the end of
- // the table itself), in order to be able to fetch it from a `stack_map` pointer.
- return reinterpret_cast<const uint32_t*>(stack_map)[-1];
-}
-
-static void FillRootTable(uint8_t* roots_data, Handle<mirror::ObjectArray<mirror::Object>> roots)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- GcRoot<mirror::Object>* gc_roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data);
- uint32_t length = roots->GetLength();
- // Put all roots in `roots_data`.
- for (uint32_t i = 0; i < length; ++i) {
- gc_roots[i] = GcRoot<mirror::Object>(roots->Get(i));
- }
- // Store the length of the table at the end. This will allow fetching it from a `stack_map`
- // pointer.
- reinterpret_cast<uint32_t*>(gc_roots + length)[0] = length;
-}
-
-static uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roots = nullptr) {
- OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
- uint8_t* data = method_header->GetOptimizedCodeInfoPtr();
- uint32_t roots = GetNumberOfRoots(data);
- if (number_of_roots != nullptr) {
- *number_of_roots = roots;
- }
- return data - ComputeRootTableSize(roots);
-}
-
-void JitCodeCache::SweepRootTables(IsMarkedVisitor* visitor) {
- MutexLock mu(Thread::Current(), lock_);
- for (const auto& entry : method_code_map_) {
- uint32_t number_of_roots = 0;
- uint8_t* roots_data = GetRootTable(entry.first, &number_of_roots);
- GcRoot<mirror::Object>* roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data);
- for (uint32_t i = 0; i < number_of_roots; ++i) {
- // This does not need a read barrier because this is called by GC.
- mirror::Object* object = roots[i].Read<kWithoutReadBarrier>();
- DCHECK(object->IsString());
- mirror::Object* new_string = visitor->IsMarked(object);
- // We know the string is marked because it's a strongly-interned string that
- // is always alive.
- // TODO: Do not use IsMarked for j.l.Class, and adjust once we move this method
- // out of the weak access/creation pause. b/32167580
- DCHECK(new_string != nullptr);
- roots[i] = GcRoot<mirror::Object>(new_string);
- }
- }
-}
-
void JitCodeCache::FreeCode(const void* code_ptr, ArtMethod* method ATTRIBUTE_UNUSED) {
uintptr_t allocation = FromCodeToAllocation(code_ptr);
+ const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
// Notify native debugger that we are about to remove the code.
// It does nothing if we are not using native debugger.
DeleteJITCodeEntryForAddress(reinterpret_cast<uintptr_t>(code_ptr));
- FreeData(GetRootTable(code_ptr));
+
+ // Use the offset directly to prevent sanity check that the method is
+ // compiled with optimizing.
+ // TODO(ngeoffray): Clean up.
+ if (method_header->vmap_table_offset_ != 0) {
+ const uint8_t* data = method_header->code_ - method_header->vmap_table_offset_;
+ FreeData(const_cast<uint8_t*>(data));
+ }
FreeCode(reinterpret_cast<uint8_t*>(allocation));
}
@@ -370,16 +308,13 @@
uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
ArtMethod* method,
- uint8_t* stack_map,
- uint8_t* roots_data,
+ const uint8_t* vmap_table,
size_t frame_size_in_bytes,
size_t core_spill_mask,
size_t fp_spill_mask,
const uint8_t* code,
size_t code_size,
- bool osr,
- Handle<mirror::ObjectArray<mirror::Object>> roots) {
- DCHECK(stack_map != nullptr);
+ bool osr) {
size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
// Ensure the header ends up at expected instruction alignment.
size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
@@ -403,7 +338,7 @@
std::copy(code, code + code_size, code_ptr);
method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
new (method_header) OatQuickMethodHeader(
- code_ptr - stack_map,
+ (vmap_table == nullptr) ? 0 : code_ptr - vmap_table,
frame_size_in_bytes,
core_spill_mask,
fp_spill_mask,
@@ -418,8 +353,6 @@
{
MutexLock mu(self, lock_);
method_code_map_.Put(code_ptr, method);
- // Fill the root table before updating the entry point.
- FillRootTable(roots_data, roots);
if (osr) {
number_of_osr_compilations_++;
osr_code_map_.Put(method, code_ptr);
@@ -475,14 +408,8 @@
FreeData(reinterpret_cast<uint8_t*>(data));
}
-void JitCodeCache::ReserveData(Thread* self,
- size_t stack_map_size,
- size_t number_of_roots,
- ArtMethod* method,
- uint8_t** stack_map_data,
- uint8_t** roots_data) {
- size_t table_size = ComputeRootTableSize(number_of_roots);
- size_t size = RoundUp(stack_map_size + table_size, sizeof(void*));
+uint8_t* JitCodeCache::ReserveData(Thread* self, size_t size, ArtMethod* method) {
+ size = RoundUp(size, sizeof(void*));
uint8_t* result = nullptr;
{
@@ -509,8 +436,7 @@
<< " for stack maps of "
<< ArtMethod::PrettyMethod(method);
}
- *roots_data = result;
- *stack_map_data = result + table_size;
+ return result;
}
class MarkCodeVisitor FINAL : public StackVisitor {
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index a97ef68..e15c93a 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -92,15 +92,13 @@
// Allocate and write code and its metadata to the code cache.
uint8_t* CommitCode(Thread* self,
ArtMethod* method,
- uint8_t* stack_map,
- uint8_t* roots_data,
+ const uint8_t* vmap_table,
size_t frame_size_in_bytes,
size_t core_spill_mask,
size_t fp_spill_mask,
const uint8_t* code,
size_t code_size,
- bool osr,
- Handle<mirror::ObjectArray<mirror::Object>> roots)
+ bool osr)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!lock_);
@@ -110,14 +108,8 @@
// Return true if the code cache contains this method.
bool ContainsMethod(ArtMethod* method) REQUIRES(!lock_);
- // Allocate a region of data that contain `size` bytes, and potentially space
- // for storing `number_of_roots` roots. Returns null if there is no more room.
- void ReserveData(Thread* self,
- size_t size,
- size_t number_of_roots,
- ArtMethod* method,
- uint8_t** stack_map_data,
- uint8_t** roots_data)
+ // Reserve a region of data of size at least "size". Returns null if there is no more room.
+ uint8_t* ReserveData(Thread* self, size_t size, ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!lock_);
@@ -196,10 +188,6 @@
bool IsOsrCompiled(ArtMethod* method) REQUIRES(!lock_);
- void SweepRootTables(IsMarkedVisitor* visitor)
- REQUIRES(!lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
private:
// Take ownership of maps.
JitCodeCache(MemMap* code_map,
@@ -213,15 +201,13 @@
// allocation fails. Return null if the allocation fails.
uint8_t* CommitCodeInternal(Thread* self,
ArtMethod* method,
- uint8_t* stack_map,
- uint8_t* roots_data,
+ const uint8_t* vmap_table,
size_t frame_size_in_bytes,
size_t core_spill_mask,
size_t fp_spill_mask,
const uint8_t* code,
size_t code_size,
- bool osr,
- Handle<mirror::ObjectArray<mirror::Object>> roots)
+ bool osr)
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index 4afca7d..ee5002f 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -67,11 +67,6 @@
return data;
}
- uint8_t* GetOptimizedCodeInfoPtr() {
- DCHECK(IsOptimized());
- return code_ - vmap_table_offset_;
- }
-
CodeInfo GetOptimizedCodeInfo() const {
return CodeInfo(GetOptimizedCodeInfoPtr());
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 3431ea2..b868563 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -81,7 +81,6 @@
#include "intern_table.h"
#include "interpreter/interpreter.h"
#include "jit/jit.h"
-#include "jit/jit_code_cache.h"
#include "jni_internal.h"
#include "linear_alloc.h"
#include "mirror/array.h"
@@ -492,14 +491,6 @@
GetMonitorList()->SweepMonitorList(visitor);
GetJavaVM()->SweepJniWeakGlobals(visitor);
GetHeap()->SweepAllocationRecords(visitor);
- if (GetJit() != nullptr) {
- // Visit JIT literal tables. Objects in these tables are classes and strings
- // and only classes can be affected by class unloading. The strings always
- // stay alive as they are strongly interned.
- // TODO: Move this closer to CleanupClassLoaders, to avoid blocking weak accesses
- // from mutators. See b/32167580.
- GetJit()->GetCodeCache()->SweepRootTables(visitor);
- }
// All other generic system-weak holders.
for (gc::AbstractSystemWeakHolder* holder : system_weak_holders_) {