Merge "ART: Move overflow gap definition to art.go"
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index 55b4306..553928d 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -112,7 +112,7 @@
if (priority == ANDROID_LOG_FATAL) {
// Allocate buffer for snprintf(buf, buf_size, "%s:%u] %s", file, line, message) below.
// If allocation fails, fall back to printing only the message.
- buf_size = strlen(file) + 1 /* ':' */ + std::numeric_limits<typeof(line)>::max_digits10 +
+ buf_size = strlen(file) + 1 /* ':' */ + std::numeric_limits<decltype(line)>::max_digits10 +
2 /* "] " */ + strlen(message) + 1 /* terminating 0 */;
buf = reinterpret_cast<char*>(malloc(buf_size));
}
diff --git a/runtime/base/safe_copy_test.cc b/runtime/base/safe_copy_test.cc
index 987895e..a9ec952 100644
--- a/runtime/base/safe_copy_test.cc
+++ b/runtime/base/safe_copy_test.cc
@@ -23,80 +23,86 @@
#include <sys/mman.h>
#include <sys/user.h>
+#include "globals.h"
+
namespace art {
#if defined(__linux__)
TEST(SafeCopyTest, smoke) {
+ DCHECK_EQ(kPageSize, static_cast<decltype(kPageSize)>(PAGE_SIZE));
+
// Map four pages, mark the second one as PROT_NONE, unmap the last one.
- void* map = mmap(nullptr, PAGE_SIZE * 4, PROT_READ | PROT_WRITE,
+ void* map = mmap(nullptr, kPageSize * 4, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
ASSERT_NE(MAP_FAILED, map);
char* page1 = static_cast<char*>(map);
- char* page2 = page1 + PAGE_SIZE;
- char* page3 = page2 + PAGE_SIZE;
- char* page4 = page3 + PAGE_SIZE;
- ASSERT_EQ(0, mprotect(page1 + PAGE_SIZE, PAGE_SIZE, PROT_NONE));
- ASSERT_EQ(0, munmap(page4, PAGE_SIZE));
+ char* page2 = page1 + kPageSize;
+ char* page3 = page2 + kPageSize;
+ char* page4 = page3 + kPageSize;
+ ASSERT_EQ(0, mprotect(page1 + kPageSize, kPageSize, PROT_NONE));
+ ASSERT_EQ(0, munmap(page4, kPageSize));
page1[0] = 'a';
- page1[PAGE_SIZE - 1] = 'z';
+ page1[kPageSize - 1] = 'z';
page3[0] = 'b';
- page3[PAGE_SIZE - 1] = 'y';
+ page3[kPageSize - 1] = 'y';
- char buf[PAGE_SIZE];
+ char buf[kPageSize];
// Completely valid read.
memset(buf, 0xCC, sizeof(buf));
- EXPECT_EQ(static_cast<ssize_t>(PAGE_SIZE), SafeCopy(buf, page1, PAGE_SIZE)) << strerror(errno);
- EXPECT_EQ(0, memcmp(buf, page1, PAGE_SIZE));
+ EXPECT_EQ(static_cast<ssize_t>(kPageSize), SafeCopy(buf, page1, kPageSize)) << strerror(errno);
+ EXPECT_EQ(0, memcmp(buf, page1, kPageSize));
// Reading into a guard page.
memset(buf, 0xCC, sizeof(buf));
- EXPECT_EQ(static_cast<ssize_t>(PAGE_SIZE - 1), SafeCopy(buf, page1 + 1, PAGE_SIZE));
- EXPECT_EQ(0, memcmp(buf, page1 + 1, PAGE_SIZE - 1));
+ EXPECT_EQ(static_cast<ssize_t>(kPageSize - 1), SafeCopy(buf, page1 + 1, kPageSize));
+ EXPECT_EQ(0, memcmp(buf, page1 + 1, kPageSize - 1));
// Reading from a guard page into a real page.
memset(buf, 0xCC, sizeof(buf));
- EXPECT_EQ(0, SafeCopy(buf, page2 + PAGE_SIZE - 1, PAGE_SIZE));
+ EXPECT_EQ(0, SafeCopy(buf, page2 + kPageSize - 1, kPageSize));
// Reading off of the end of a mapping.
memset(buf, 0xCC, sizeof(buf));
- EXPECT_EQ(static_cast<ssize_t>(PAGE_SIZE), SafeCopy(buf, page3, PAGE_SIZE * 2));
- EXPECT_EQ(0, memcmp(buf, page3, PAGE_SIZE));
+ EXPECT_EQ(static_cast<ssize_t>(kPageSize), SafeCopy(buf, page3, kPageSize * 2));
+ EXPECT_EQ(0, memcmp(buf, page3, kPageSize));
// Completely invalid.
- EXPECT_EQ(0, SafeCopy(buf, page1 + PAGE_SIZE, PAGE_SIZE));
+ EXPECT_EQ(0, SafeCopy(buf, page1 + kPageSize, kPageSize));
// Clean up.
- ASSERT_EQ(0, munmap(map, PAGE_SIZE * 3));
+ ASSERT_EQ(0, munmap(map, kPageSize * 3));
}
TEST(SafeCopyTest, alignment) {
+ DCHECK_EQ(kPageSize, static_cast<decltype(kPageSize)>(PAGE_SIZE));
+
// Copy the middle of a mapping to the end of another one.
- void* src_map = mmap(nullptr, PAGE_SIZE * 3, PROT_READ | PROT_WRITE,
+ void* src_map = mmap(nullptr, kPageSize * 3, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
ASSERT_NE(MAP_FAILED, src_map);
// Add a guard page to make sure we don't write past the end of the mapping.
- void* dst_map = mmap(nullptr, PAGE_SIZE * 4, PROT_READ | PROT_WRITE,
+ void* dst_map = mmap(nullptr, kPageSize * 4, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
ASSERT_NE(MAP_FAILED, dst_map);
char* src = static_cast<char*>(src_map);
char* dst = static_cast<char*>(dst_map);
- ASSERT_EQ(0, mprotect(dst + 3 * PAGE_SIZE, PAGE_SIZE, PROT_NONE));
+ ASSERT_EQ(0, mprotect(dst + 3 * kPageSize, kPageSize, PROT_NONE));
src[512] = 'a';
- src[PAGE_SIZE * 3 - 512 - 1] = 'z';
+ src[kPageSize * 3 - 512 - 1] = 'z';
- EXPECT_EQ(static_cast<ssize_t>(PAGE_SIZE * 3 - 1024),
- SafeCopy(dst + 1024, src + 512, PAGE_SIZE * 3 - 1024));
- EXPECT_EQ(0, memcmp(dst + 1024, src + 512, PAGE_SIZE * 3 - 1024));
+ EXPECT_EQ(static_cast<ssize_t>(kPageSize * 3 - 1024),
+ SafeCopy(dst + 1024, src + 512, kPageSize * 3 - 1024));
+ EXPECT_EQ(0, memcmp(dst + 1024, src + 512, kPageSize * 3 - 1024));
- ASSERT_EQ(0, munmap(src_map, PAGE_SIZE * 3));
- ASSERT_EQ(0, munmap(dst_map, PAGE_SIZE * 4));
+ ASSERT_EQ(0, munmap(src_map, kPageSize * 3));
+ ASSERT_EQ(0, munmap(dst_map, kPageSize * 4));
}
#endif // defined(__linux__)
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index e2d45ac..74e7c18 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -145,22 +145,22 @@
V(A64Load, int64_t, volatile const int64_t *) \
V(A64Store, void, volatile int64_t *, int64_t) \
\
- V(NewEmptyString, void) \
- V(NewStringFromBytes_B, void) \
- V(NewStringFromBytes_BI, void) \
- V(NewStringFromBytes_BII, void) \
- V(NewStringFromBytes_BIII, void) \
- V(NewStringFromBytes_BIIString, void) \
- V(NewStringFromBytes_BString, void) \
- V(NewStringFromBytes_BIICharset, void) \
- V(NewStringFromBytes_BCharset, void) \
- V(NewStringFromChars_C, void) \
- V(NewStringFromChars_CII, void) \
- V(NewStringFromChars_IIC, void) \
- V(NewStringFromCodePoints, void) \
- V(NewStringFromString, void) \
- V(NewStringFromStringBuffer, void) \
- V(NewStringFromStringBuilder, void) \
+ V(NewEmptyString, void, void) \
+ V(NewStringFromBytes_B, void, void) \
+ V(NewStringFromBytes_BI, void, void) \
+ V(NewStringFromBytes_BII, void, void) \
+ V(NewStringFromBytes_BIII, void, void) \
+ V(NewStringFromBytes_BIIString, void, void) \
+ V(NewStringFromBytes_BString, void, void) \
+ V(NewStringFromBytes_BIICharset, void, void) \
+ V(NewStringFromBytes_BCharset, void, void) \
+ V(NewStringFromChars_C, void, void) \
+ V(NewStringFromChars_CII, void, void) \
+ V(NewStringFromChars_IIC, void, void) \
+ V(NewStringFromCodePoints, void, void) \
+ V(NewStringFromString, void, void) \
+ V(NewStringFromStringBuffer, void, void) \
+ V(NewStringFromStringBuilder, void, void) \
\
V(ReadBarrierJni, void, mirror::CompressedReference<mirror::Object>*, Thread*) \
V(ReadBarrierMarkReg00, mirror::Object*, mirror::Object*) \
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index ef4fa28..df097a0 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -4004,7 +4004,8 @@
native_blocking_gcs_finished_++;
native_blocking_gc_cond_->Broadcast(self);
}
- } else if (new_value > NativeAllocationGcWatermark() && !IsGCRequestPending()) {
+ } else if (new_value > NativeAllocationGcWatermark() * HeapGrowthMultiplier() &&
+ !IsGCRequestPending()) {
// Trigger another GC because there have been enough native bytes
// allocated since the last GC.
if (IsGcConcurrent()) {
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 2589ad0..fdc0505 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -140,12 +140,6 @@
result->SetJ(0);
return false;
} else {
- if (called_method->IsIntrinsic()) {
- if (MterpHandleIntrinsic(&shadow_frame, called_method, inst, inst_data,
- shadow_frame.GetResultRegister())) {
- return !self->IsExceptionPending();
- }
- }
jit::Jit* jit = Runtime::Current()->GetJit();
if (jit != nullptr) {
if (type == kVirtual) {
@@ -153,6 +147,12 @@
}
jit->AddSamples(self, sf_method, 1, /*with_backedges*/false);
}
+ if (called_method->IsIntrinsic()) {
+ if (MterpHandleIntrinsic(&shadow_frame, called_method, inst, inst_data,
+ shadow_frame.GetResultRegister())) {
+ return !self->IsExceptionPending();
+ }
+ }
return DoCall<false, false>(called_method, self, shadow_frame, inst, inst_data, result);
}
}
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index a53040c..5f94d04 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -276,6 +276,12 @@
vtable_idx, kRuntimePointerSize);
if ((called_method != nullptr) && called_method->IsIntrinsic()) {
if (MterpHandleIntrinsic(shadow_frame, called_method, inst, inst_data, result_register)) {
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (jit != nullptr) {
+ jit->InvokeVirtualOrInterface(
+ receiver, shadow_frame->GetMethod(), shadow_frame->GetDexPC(), called_method);
+ jit->AddSamples(self, shadow_frame->GetMethod(), 1, /*with_backedges*/false);
+ }
return !self->IsExceptionPending();
}
}
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index d7527d5..6230ae9 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -582,7 +582,7 @@
// Primitive types are only assignable to themselves
const char* prims = "ZBCSIJFD";
- Class* prim_types[strlen(prims)];
+ std::vector<Class*> prim_types(strlen(prims));
for (size_t i = 0; i < strlen(prims); i++) {
prim_types[i] = class_linker_->FindPrimitiveClass(prims[i]);
}
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 483d255..4e143e0 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -681,6 +681,14 @@
deoptimization_counts_[static_cast<size_t>(kind)]++;
}
+ uint32_t GetNumberOfDeoptimizations() const {
+ uint32_t result = 0;
+ for (size_t i = 0; i <= static_cast<size_t>(DeoptimizationKind::kLast); ++i) {
+ result += deoptimization_counts_[i];
+ }
+ return result;
+ }
+
private:
static void InitPlatformSignalHandlers();
diff --git a/test/652-deopt-intrinsic/expected.txt b/test/652-deopt-intrinsic/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/652-deopt-intrinsic/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/652-deopt-intrinsic/info.txt b/test/652-deopt-intrinsic/info.txt
new file mode 100644
index 0000000..58a90fa
--- /dev/null
+++ b/test/652-deopt-intrinsic/info.txt
@@ -0,0 +1,2 @@
+Regression test for the interpreter/JIT, where the interpreter used to not
+record inline caches when seeing an intrinsic.
diff --git a/test/652-deopt-intrinsic/src/Main.java b/test/652-deopt-intrinsic/src/Main.java
new file mode 100644
index 0000000..a82580c
--- /dev/null
+++ b/test/652-deopt-intrinsic/src/Main.java
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ System.loadLibrary(args[0]);
+ loop();
+ ensureJitCompiled(Main.class, "$noinline$doCall");
+ loop();
+ }
+
+ public static void loop() {
+ Main m = new Main();
+ for (int i = 0; i < 5000; i++) {
+ $noinline$doCall("foo");
+ $noinline$doCall(m);
+ if (numberOfDeoptimizations() != 0) {
+ throw new Error("Unexpected deoptimizations");
+ }
+ }
+ }
+
+ public static boolean $noinline$doCall(Object foo) {
+ return foo.equals(Main.class);
+ }
+
+ public static native int numberOfDeoptimizations();
+ public static native void ensureJitCompiled(Class<?> cls, String methodName);
+}
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index b683a27..d2cfbff 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -238,4 +238,8 @@
return method->GetCounter();
}
+extern "C" JNIEXPORT int JNICALL Java_Main_numberOfDeoptimizations(JNIEnv*, jclass) {
+ return Runtime::Current()->GetNumberOfDeoptimizations();
+}
+
} // namespace art
diff --git a/tools/ahat/README.txt b/tools/ahat/README.txt
index 133426f..3049871 100644
--- a/tools/ahat/README.txt
+++ b/tools/ahat/README.txt
@@ -75,6 +75,9 @@
* Instance.isRoot and Instance.getRootTypes.
Release History:
+ 1.2 Pending
+ Simplify presentation of sample path from gc root.
+
1.1 Feb 21, 2017
Show java.lang.ref.Reference referents as "unreachable" instead of null.
diff --git a/tools/ahat/src/ObjectHandler.java b/tools/ahat/src/ObjectHandler.java
index 2e0ae6e..b1d7904 100644
--- a/tools/ahat/src/ObjectHandler.java
+++ b/tools/ahat/src/ObjectHandler.java
@@ -19,7 +19,6 @@
import com.android.ahat.heapdump.AhatArrayInstance;
import com.android.ahat.heapdump.AhatClassInstance;
import com.android.ahat.heapdump.AhatClassObj;
-import com.android.ahat.heapdump.AhatHeap;
import com.android.ahat.heapdump.AhatInstance;
import com.android.ahat.heapdump.AhatSnapshot;
import com.android.ahat.heapdump.Diff;
@@ -29,7 +28,6 @@
import com.android.ahat.heapdump.Value;
import java.io.IOException;
import java.util.Collection;
-import java.util.Collections;
import java.util.List;
import java.util.Objects;
@@ -249,47 +247,16 @@
private void printGcRootPath(Doc doc, Query query, AhatInstance inst) {
doc.section("Sample Path from GC Root");
List<PathElement> path = inst.getPathFromGcRoot();
-
- // Add a dummy PathElement as a marker for the root.
- final PathElement root = new PathElement(null, null);
- path.add(0, root);
-
- HeapTable.TableConfig<PathElement> table = new HeapTable.TableConfig<PathElement>() {
- public String getHeapsDescription() {
- return "Bytes Retained by Heap (Dominators Only)";
- }
-
- public long getSize(PathElement element, AhatHeap heap) {
- if (element == root) {
- return heap.getSize();
- }
- if (element.isDominator) {
- return element.instance.getRetainedSize(heap);
- }
- return 0;
- }
-
- public List<HeapTable.ValueConfig<PathElement>> getValueConfigs() {
- HeapTable.ValueConfig<PathElement> value = new HeapTable.ValueConfig<PathElement>() {
- public String getDescription() {
- return "Path Element";
- }
-
- public DocString render(PathElement element) {
- if (element == root) {
- return DocString.link(DocString.uri("rooted"), DocString.text("ROOT"));
- } else {
- DocString label = DocString.text("→ ");
- label.append(Summarizer.summarize(element.instance));
- label.append(element.field);
- return label;
- }
- }
- };
- return Collections.singletonList(value);
- }
+ doc.table(new Column(""), new Column("Path Element"));
+ doc.row(DocString.text("(rooted)"),
+ DocString.link(DocString.uri("root"), DocString.text("ROOT")));
+ for (PathElement element : path) {
+ DocString label = DocString.text("→ ");
+ label.append(Summarizer.summarize(element.instance));
+ label.append(element.field);
+ doc.row(DocString.text(element.isDominator ? "(dominator)" : ""), label);
};
- HeapTable.render(doc, query, DOMINATOR_PATH_ID, table, mSnapshot, path);
+ doc.end();
}
public void printDominatedObjects(Doc doc, Query query, AhatInstance inst) {